repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
alehenaff/textfielfdtemplate | templatefield/filters.py | from templatefield.models import Substitution
def simplesubstitution(value,substitution):
if Substitution.objects.filter(name=substitution, key=value).exists():
r = Substitution.objects.get(name=substitution, key=value)
return r.value
|
alehenaff/textfielfdtemplate | templatefield/models.py | from django.db import models
from django.template import Template, Context
from django.template.base import VariableNode
import jinja2
from jinja2 import Environment, meta
import json
# Create your models here.
class TextFieldTemplate(models.Model):
template = models.TextField()
context = models.TextField() #JSONField
def rendertemplate(self):
try:
t=Template(self.template)
try:
c=Context(json.loads(self.context))
return t.render(c)
except:
return("context error")
except:
return("error")
def get_variable_nodes(self):
try:
t=Template(self.template)
return t.nodelist.get_nodes_by_type(VariableNode)
except:
return("error")
def get_undefined_variables(self):
try:
env = Environment()
parsed_context = env.parse(self.template)
return meta.find_undeclared_variables(parsed_context)
except Exception as ex:
return(ex)
def rendertemplatejinja(self, filters= None):
try:
e = jinja2.Environment()
if filters:
for filtername, filter in filters:
e.filters[filtername]=filter
tem = e.from_string(self.template)
return tem.render(json.loads(self.context))
except Exception as ex:
return(ex)
class Substitution(models.Model):
name = models.CharField(max_length=70)
key = models.CharField(max_length=100)
value = models.CharField(max_length=100)
class Meta:
unique_together = (('name','key','value')) |
alehenaff/textfielfdtemplate | templatefield/tests.py | from django.test import TestCase
from templatefield.models import TextFieldTemplate, Substitution
from templatefield.filters import simplesubstitution
# Create your tests here.
class TextFieldTemplateTest(TestCase):
def setUp(self):
TextFieldTemplate.objects.create(template='Bonjour {{client.name}}, {{client.address}}',
context='{"client":{"name":"toto","address":"Paris"}}')
TextFieldTemplate.objects.create(template='{{domaine.name|simplesubstitution("aerodom")}}',
context='{"domaine":{"name":"atlantique"}}')
Substitution.objects.create(name='aerodom', key='atlantique', value='z')
def test_simple_render(self):
a = TextFieldTemplate.objects.first()
self.assertEqual(a.rendertemplate(), 'Bonjour toto, Paris')
def test_jinja_render(self):
a = TextFieldTemplate.objects.first()
self.assertEqual(a.rendertemplatejinja(), 'Bonjour toto, Paris')
def test_undefined_variables(self):
a = TextFieldTemplate.objects.first()
self.assertEqual(a.get_undefined_variables(), {'client'})
def test_filter(self):
a = TextFieldTemplate.objects.last()
self.assertEqual(a.rendertemplatejinja(filters=[('simplesubstitution', simplesubstitution)]), 'z')
|
TangLeeee/test | common.py | from flask import Flask,jsonify
app = Flask(__name__)
response_index = {
'status' : 0,
'messages' : [
{'id':1, 'img':'https://uploadbeta.com/api/pictures/random/'},
{'id':2, 'img':'https://uploadbeta.com/api/pictures/random/?key=BingEverydayWallpaperPicture'},
{'id':3, 'img':'http://acg.bakayun.cn/randbg.php?Type=&t=&https='},
]
}
response_newslist = {
'status' : 0,
'messages' : [
{'id':1, 'title':'新闻1', 'add_time':'2019-05-30 18:08:33', 'zhaiyao':'新闻摘要1','click':8},
{'id':2, 'title':'新闻2', 'add_time':'2019-05-30 17:39:33', 'zhaiyao':'新闻摘要2','click':6},
{'id':3, 'title':'新闻3', 'add_time':'2019-05-30 10:33:39', 'zhaiyao':'新闻摘要3','click':4},
]
}
response_newsinfo = {
'status' : 0,
'messages' : [
{'id': 1, 'title': '猛龙胜勇士1-0', 'add_time': '2019-05-31', 'content': ' 库里开局不久造成伦纳德投篮犯规,2罚全中,完成2019年总决赛首次得分,但猛龙很快由丹尼-格林三分还以颜色。双方一番缠斗后,小加利用勇士放空的机会,连续在外线投中三分,帮助猛龙18-11取得领先。'
' 关键时刻,库里利用汤普森无球掩护,投中本场比赛第2个三分,这是他在总决赛投中的第100个三分,成为NBA历史上首个完成这个成就的球员。库里其后又在左侧底角投中三分,率队打出一个8-0攻击波,勇士19-18反超比分。'
' 勇士第三节开局三分投得不错,库里和汤普森先后投中三分,帮助勇士61-67迫近比分。在随后的比赛中,西亚卡姆展现出出色的进攻天赋,里突外投不断得分,帮助猛龙79-68重新取得11分领先优势。'
' 西亚卡姆第三节6投全中,单节得到14分,将个人得分提高到26分,伦纳德第三节5投2中,罚球6罚5中单节得到10分,将个人得分提高到18分。勇士这边,库里14投6中,三分8投4中得到26分3篮板2助攻。'
' 第四节进行到7分32秒,猛龙再次发动快攻反击,西亚卡姆突破分球,助攻空位的丹尼-格林投中三分,猛龙100-88将领先优势扩大到12分。勇士请求暂停后连得4分,但伦纳德错位单打三分得手,猛龙重新取得10分以上的优势并拿下比赛。', 'click': 8},
{'id': 2, 'title': '新闻2', 'add_time': '2019-05-30', 'content': '详细内容2', 'click': 4},
{'id': 3, 'title': '新闻3', 'add_time': '2019-05-30', 'content': '详细内容3', 'click': 6},
]
}
response_comment_1 = {
'messages' : [
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:13:00', 'content': '评论一下'},
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:14:00', 'content': '哈哈哈'},
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:15:00', 'content': '你好'},
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:16:00', 'content': '我的朋友'},
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:19:00', 'content': '123'}
]
}
response_comment_2 = {
'messages' : [
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:13:00', 'content': '北京'},
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:14:00', 'content': '上海'},
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:15:00', 'content': '广州'},
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:16:00', 'content': '深圳'},
{'user_name' : '匿名用户', 'addtime': '2019-05-31 13:19:00', 'content': '456'}
]
}
@app.route('/', methods=['GET'])
def data():
resp = jsonify(response_index)
# set response heades to avoid being blocked by CORS policy
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/api/getnewslist', methods=['GET'])
def data_news():
resp = jsonify(response_newslist)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/api/getnew/<int:newid>', methods=['GET'])
def data_info(newid):
obj = response_newsinfo['messages'][newid - 1]
resp = jsonify({'status':0, 'message':obj})
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/api/getcomments/<int:pageIndex>', methods=['GET'])
def news_info(pageIndex):
if pageIndex == 1:
resp = jsonify(response_comment_1)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
if pageIndex == 2:
resp = jsonify(response_comment_2)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
if __name__ == '__main__':
# 这样使得外网也可以访问
# app.run(host='0.0.0.0')
# 每次修改后不用重启,不能用于生产环境
# app.run(debug=True)
app.run()
|
icanswim/cosmosis | model.py | <gh_stars>0
from abc import ABC, abstractmethod
from math import sqrt
from torch import nn, cat, squeeze, softmax, Tensor, flatten, sigmoid, max, mean
from torch.nn import functional as F
import torchvision.models as torchvisionmodels
def tv_model(model_name='resnet18', tv_params={}, in_channels=3):
launcher = getattr(torchvisionmodels, model_name)
model = launcher(**tv_params)
if model_name in ['resnet18','resnet34','resnet50','wide_resnet50_2','resnext50_32x4d']:
model.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=64,
kernel_size=7, stride=2, padding=3, bias=False)
print('TorchVision model {} loaded...'.format(model_name))
return model
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size,
stride=1, padding=0, dilation=1, groups=1,
relu=True, bn=True, bias=False):
super().__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class FFUnit(nn.Module):
def __init__(self, D_in, D_out, dropout=False, activation=nn.SELU):
ffu = []
ffu.append(nn.Linear(D_in, D_out))
ffu.append(activation())
ffu.append(nn.BatchNorm1d(D_out))
if bam: ffu.append(BAM(D_out))
if dropout: ffu.append(nn.Dropout(drop))
self.layers = nn.Sequential(*ffu)
def forward(self, x):
return self.layers(x)
class ConvUnit(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3,
stride=1, padding=1, dilation=1, groups=1, bias=False,
activation=None, cbam=False):
conv = []
conv.append(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias))
conv.append(nn.BatchNorm2d(out_channels))
if activation: conv.append(activation())
if pool:
conv.append(nn.MaxPool2d(2, stride=2, padding=1))
out_channels = out_channels/2
conv.append(nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias))
if cbam: conv.append(CBAM(out_channels))
if dropout: conv.append(nn.Dropout(p=dropout))
self.layers = nn.Sequential(*conv)
def forward(self, x):
return self.layers(x)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGateB(nn.Module):
def __init__(self, gate_channel, reduction_ratio=16, num_layers=1):
super().__init__()
self.gate_c = nn.Sequential()
self.gate_c.add_module('flatten', Flatten())
gate_channels = [gate_channel]
gate_channels += [gate_channel // reduction_ratio] * num_layers
gate_channels += [gate_channel]
for i in range(len(gate_channels) - 2):
self.gate_c.add_module('gate_c_fc_%d'%i, nn.Linear(gate_channels[i], gate_channels[i+1]))
self.gate_c.add_module('gate_c_bn_%d'%(i+1), nn.BatchNorm1d(gate_channels[i+1]))
self.gate_c.add_module('gate_c_relu_%d'%(i+1), nn.ReLU())
self.gate_c.add_module('gate_c_fc_final', nn.Linear(gate_channels[-2], gate_channels[-1]))
def forward(self, in_tensor):
avg_pool = F.avg_pool2d(in_tensor, in_tensor.size(2), stride=in_tensor.size(2))
return self.gate_c(avg_pool).unsqueeze(2).unsqueeze(3).expand_as(in_tensor)
class ChannelGateC(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super().__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type=='avg':
avg_pool = F.avg_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( avg_pool )
elif pool_type=='max':
max_pool = F.max_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( max_pool )
elif pool_type=='lp':
lp_pool = F.lp_pool2d( x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( lp_pool )
elif pool_type=='lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp( lse_pool )
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = sigmoid( channel_att_sum ).unsqueeze(2).unsqueeze(3).expand_as(x)
return x * scale
class ChannelPool(nn.Module):
def forward(self, x):
return cat((max(x,1)[0].unsqueeze(1), mean(x,1).unsqueeze(1)), dim=1)
class SpatialGateC(nn.Module):
def __init__(self):
super().__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = sigmoid(x_out) # broadcasting
return x * scale
class SpatialGateB(nn.Module):
def __init__(self, gate_channel, reduction_ratio=16, dilation_conv_num=2, dilation_val=4):
super().__init__()
self.gate_s = nn.Sequential()
self.gate_s.add_module('gate_s_conv_reduce0', nn.Conv2d(
gate_channel, gate_channel//reduction_ratio, kernel_size=1))
self.gate_s.add_module('gate_s_bn_reduce0', nn.BatchNorm2d(
gate_channel//reduction_ratio))
self.gate_s.add_module('gate_s_relu_reduce0',nn.ReLU())
for i in range( dilation_conv_num ):
self.gate_s.add_module('gate_s_conv_di_%d'%i, nn.Conv2d(
gate_channel//reduction_ratio, gate_channel//reduction_ratio, kernel_size=3,\
padding=dilation_val, dilation=dilation_val))
self.gate_s.add_module('gate_s_bn_di_%d'%i, nn.BatchNorm2d(gate_channel//reduction_ratio))
self.gate_s.add_module('gate_s_relu_di_%d'%i, nn.ReLU())
self.gate_s.add_module('gate_s_conv_final', nn.Conv2d(gate_channel//reduction_ratio,
1, kernel_size=1))
def forward(self, in_tensor):
return self.gate_s(in_tensor).expand_as(in_tensor)
class BAM(nn.Module):
def __init__(self, gate_channel):
super().__init__()
self.channel_att = ChannelGateB(gate_channel)
self.spatial_att = SpatialGateB(gate_channel)
def forward(self,in_tensor):
att = 1 + sigmoid(self.channel_att(in_tensor) * self.spatial_att(in_tensor))
return att * in_tensor
class CBAM(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False):
super().__init__()
self.ChannelGate = ChannelGateC(gate_channels, reduction_ratio, pool_types)
self.no_spatial = no_spatial
if not no_spatial:
self.SpatialGate = SpatialGateC()
def forward(self, x):
x_out = self.ChannelGate(x)
if not self.no_spatial:
x_out = self.SpatialGate(x_out)
return x_out
class CModel(nn.Module):
"""A base class for cosmosis models
embeds = [('feature',n_vocab,len_vec,padding_idx,param.requires_grad),...]
'feature' = name/key of feature to be embedded
voc = vocabulary size (int)
vec = length of the embedding vectors (int)
padding_idx = False/int
param.requires_grad = True/False
"""
def __init__(self, embed_params=[]):
super().__init__()
print('CModel loaded...')
#self.layers = nn.ModuleList(self.layers) #implement in the subclass
self.embeddings = self.embedding_layer(embed_params)
self.weight_init()
def weight_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
#nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.InstanceNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def embedding_layer(self, embed_params):
if len(embed_params) == 0:
return None
else:
embeddings = [nn.Embedding(voc, vec, padding_idx).to('cuda:0') \
for _, voc, vec, padding_idx, _ in embed_params]
for i, e in enumerate(embed_params):
param = embeddings[i].weight
param.requires_grad = e[4]
return embeddings
def forward(self, X=None, embed_idx=[]):
"""check for categorical and/or continuous inputs, get the embeddings and
concat as appropriate, feed to model.
embeds = a list of torch.cuda tensor int64 indices to be fed to the embedding layer
ex: [[1,2,1],[5]] (2 different embeded features, 3 instances and 1 instance respectively)
X = torch tensor of concatenated continuous feature vectors
"""
if len(embed_idx) > 0:
embedded = []
for e, idx in enumerate(embed_idx):
out = self.embeddings[e](idx)
embedded.append(flatten(out, start_dim=1))
if len(embedded) > 1:
embedded = cat(emb, dim=1)
if X is not None:
X = cat([X, *embedded], dim=1)
else:
X = embedded
for l in self.layers:
X = l(X)
return X
def adapt(self, shape):
"""for adapting a dataset shape[0] to a saved model shape[1]
shape = (data_shape, model_shape)"""
# freeze the layers
for param in self.parameters():
param.requires_grad = False
# prepend a trainable adaptor layer
for l in self.ffunit(shape[0], shape[1], 0.2)[::-1]:
self.layers.insert(0, l)
def ff_unit(self, D_in, D_out, dropout=False, activation=nn.SELU):
ffu = []
ffu.append(nn.Linear(D_in, D_out))
if activation: ffu.append(activation())
ffu.append(nn.BatchNorm1d(D_out))
if dropout: ffu.append(nn.Dropout(dropout))
return nn.Sequential(*ffu)
def conv_unit(self, in_channels, out_channels, kernel_size=3,
stride=1, padding=1, dilation=1, groups=1, bias=False,
activation=None, cbam=False, dropout=False, pool=False):
conv = []
conv.append(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias))
conv.append(nn.BatchNorm2d(out_channels))
if activation: conv.append(activation())
if pool: conv.append(nn.MaxPool2d(kernel_size=5, stride=2, padding=1))
conv.append(nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias))
if cbam: conv.append(CBAM(out_channels))
if dropout: conv.append(nn.Dropout(p=dropout))
return nn.Sequential(*conv)
def res_connect(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1):
res = []
res.append(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
stride=stride, dilation=dilation))
res.append(nn.BatchNorm2d(out_channels))
return nn.Sequential(*res)
class ResBam(CModel):
"""ConvNet with options for residual connections and attention units and NeXt groupings
ResNet https://arxiv.org/abs/1512.03385
ResNeXt https://arxiv.org/abs/1611.05431
CBAM https://arxiv.org/abs/1807.06521v2
"""
def __init__(self, n_classes, in_channels, groups=1, residual=False, bam=False,
dropout=[False,False,False,False,False], act=nn.LeakyReLU):
super().__init__()
self.residual = residual
self.bam = bam
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=9, stride=2,
padding=5, dilation=1, bias=False)
self.bn = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=5, stride=2, padding=1)
self.activation = nn.SELU()
self.unit1 = self.conv_unit(64, 128, kernel_size=3, stride=1, groups=groups,
activation=act, cbam=bam, dropout=dropout[0])
if residual: self.res1 = self.res_connect(64, 128, kernel_size=1, stride=1, dilation=1)
if bam: self.bam1 = BAM(128)
self.unit2 = self.conv_unit(128, 256, kernel_size=3, stride=2, groups=groups,
activation=act, cbam=bam, dropout=dropout[1])
if residual: self.res2 = self.res_connect(128, 256, kernel_size=1, stride=4, dilation=1)
if bam: self.bam2 = BAM(256)
self.unit3 = self.conv_unit(256, 512, kernel_size=3, stride=2, groups=groups,
activation=act, cbam=bam, dropout=dropout[2])
if residual: self.res3 = self.res_connect(256, 512, kernel_size=1, stride=4, dilation=1)
if bam: self.bam3 = BAM(512)
self.unit4 = self.conv_unit(512, 1024, kernel_size=3, stride=2, groups=groups,
activation=None, cbam=False, dropout=dropout[3])
if residual: self.res4 = self.res_connect(512, 1024, kernel_size=1, stride=4, dilation=1)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.fc = self.ff_unit(1024, n_classes, dropout=dropout[4], activation=None)
self.weight_init()
print('ResBam model loaded...')
def forward(self, X):
X = self.conv1(X)
X = self.bn(X)
X = self.activation(X)
X = self.maxpool(X)
if self.residual: res = self.res1(X)
X = self.unit1(X)
if self.residual: X += res
X = self.activation(X)
if self.bam: X = self.bam1(X)
if self.residual: res = self.res2(X)
X = self.unit2(X)
if self.residual: X += res
X = self.activation(X)
if self.bam: X = self.bam2(X)
if self.residual: res = self.res3(X)
X = self.unit3(X)
if self.residual: X += res
X = self.activation(X)
if self.bam: X = self.bam3(X)
if self.residual: res = self.res4(X)
X = self.unit4(X)
if self.residual: X += res
X = self.activation(X)
X = self.avgpool(X)
X = flatten(X, 1)
X = self.fc(X)
return X
class FFNet(CModel):
model_config = {}
model_config['simple'] = {'shape': [('D_in',1),(1,1),(1,1/2),(1/2,'D_out')],
'dropout': [.2, .3, .1]}
model_config['funnel'] = {'shape': [('D_in',1),(1,1/2),(1/2,1/2),(1/2,1/4),(1/4,1/4),(1/4,'D_out')],
'dropout': [.1, .2, .3, .2, .1]}
def __init__(self, model_name='funnel', D_in=0, H=0, D_out=0, embed_params=[]):
super().__init__(embed_params)
config = FFNet.model_config[model_name]
layers = []
layers.append(self.ff_unit(D_in, int(config['shape'][0][1]*H), dropout=config['dropout'][0]))
for i, s in enumerate(config['shape'][1:-1]):
layers.append(self.ff_unit(int(s[0]*H), int(s[1]*H), dropout=config['dropout'][i]))
layers.append([nn.Linear(int(config['shape'][-1][0]*H), D_out)])
self.layers = [l for ffu in layers for l in ffu] # flatten
self.layers = nn.ModuleList(self.layers)
print('FFNet model loaded...')
|
icanswim/cosmosis | dataset.py | from abc import ABC, abstractmethod
import os, re, random, h5py, pickle
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
from torch.utils.data import Dataset, ConcatDataset
from torch import as_tensor, squeeze
from torchvision import datasets as tvds
from sklearn import datasets as skds
from PIL import ImageFile, Image, ImageStat
ImageFile.LOAD_TRUNCATED_IMAGES = True
class CDataset(Dataset, ABC):
"""An abstract base class for cosmosis datasets
features = ['data','keys']
embed_lookup = {'label': index}
ds_idx = list of indices or keys to be passed to the Sampler and Dataloader
transform/target_transform = [Transformer_Class(),...]
"""
def __init__ (self, features=[], targets=[], embeds=[], embed_lookup={},
transform=[], target_transform=[], pad=None, flatten=False, **kwargs):
self.transform, self.target_transform = transform, target_transform
self.embeds, self.embed_lookup = embeds, embed_lookup
self.features, self.targets = features, targets
self.pad, self.flatten = pad, flatten
self.ds = self.load_data(**kwargs)
try:
self.ds_idx = list(self.ds.keys())
except:
pass
print('CDataset created...')
def __getitem__(self, i):
X, embed_idx, y = [], [], []
if len(self.features) > 0:
X = self._get_features(self.ds[i], self.features)
for transform in self.transform:
X = transform(X)
if len(self.embeds) > 0:
embed_idx = self._get_embed_idx(self.ds[i], self.embeds, self.embed_lookup)
if len(self.targets) > 0:
y = self._get_features(self.ds[i], self.targets)
for transform in self.target_transform:
y = transform(y)
return X, embed_idx, y
def __iter__(self):
for i in self.ds_idx:
yield self.__getitem__(i)
def __len__(self):
return len(self.ds_idx)
def _get_features(self, datadic, features):
data = []
for f in features:
out = datadic[f]
if self.pad is not None:
out = np.pad(out, (0, (self.pad - out.shape[0])))
if self.flatten:
out = np.reshape(out, -1)
data.append(out)
return np.concatenate(data)
def _get_embed_idx(self, datadic, embeds, embed_lookup):
embed_idx = []
for e in embeds:
out = datadic[e]
idx = []
if self.pad is not None:
out = np.pad(out, (0, (self.pad - out.shape[0])))
for i in np.reshape(out, -1).tolist():
idx.append(np.reshape(np.asarray(embed_lookup[i]), -1).astype('int64'))
embed_idx.append(np.concatenate(idx))
return embed_idx
@abstractmethod
def load_data(self):
datadic = {1: {'feature_1': np.asarray([.04]),
'feature_2': np.asarray([.02]),
'feature_3': ['b'],
'feature_4': ['c','c','d'],
'feature_5': np.asarray([1.1])},
2: {'feature_1': np.asarray([.03]),
'feature_2': np.asarray([.01]),
'feature_3': ['a'],
'feature_4': ['d','d','d'],
'feature_5': np.asarray([1.2])}}
self.embed_lookup = {'a': 1,'b': 2,'c': 3,'d': 4}
return datadic
class ImStat(ImageStat.Stat):
"""A class for calculating a PIL image mean and std dev"""
def __add__(self, other):
return ImStat(list(map(np.add, self.h, other.h)))
class ImageDatasetStats():
"""A class for calculating an image datasets mean and std dev"""
def __init__(self, dataset):
self.stats = None
i = 1
print('images to process: {}'.format(len(dataset.ds_idx)))
for image in dataset:
if self.stats == None:
self.stats = ImStat(image[0])
else:
self.stats += ImStat(image[0])
i += 1
if i % 10000 == 0:
print('images processed: {}'.format(i))
print('mean: {}, stddev: {}'.format(self.stats.mean, self.stats.stddev))
class LoadImage():
"""A transformer for use with image file based datasets
transforms (loads) an image filename into a PIL image"""
def __call__(self, filename):
return Image.open(filename)
class AsTensor():
"""Transforms a numpy array to a torch tensor"""
def __call__(self, arr):
return as_tensor(arr)
class Transpose():
"""Transforms a numpy array"""
def __call__(self, arr):
return np.transpose(arr)
class Squeeze():
"""Transforms a torch array"""
def __call__(self, arr):
return squeeze(arr)
class DType():
"""Transforms a numpy array"""
def __init__(self, datatype):
self.datatype = datatype
def __call__(self, arr):
return arr.astype(self.datatype)
class TVDS(CDataset):
"""A wrapper for torchvision.datasets
dataset = torchvision datasets class name str ('FakeData')
tv_params = dict of torchvision.dataset parameters ({'size': 1000})
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ds_idx = list(range(len(self.ds)))
print('TVDS created...')
def __getitem__(self, i):
X = self.ds[i][0]
#X = np.reshape(np.asarray(self.ds[i][0]), -1).astype(np.float32)
y = self.ds[i][1]
#y = np.squeeze(np.asarray(self.ds[i][1]).astype(np.int64))
return X, [], y
def load_data(self, dataset, tv_params):
ds = getattr(tvds, dataset)(**tv_params)
return ds
class SKDS(CDataset):
"""A wrapper for sklearn.datasets
https://scikit-learn.org/stable/datasets/sample_generators.html
make = sklearn datasets method name str ('make_regression')
sk_params = dict of sklearn.datasets parameters ({'n_samples': 100})
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
print('SKDS {} created...'.format(kwargs['make']))
def load_data(self, make, sk_params, features_dtype, targets_dtype):
ds = getattr(skds, make)(**sk_params)
datadic = {}
for i in range(len(ds[0])):
datadic[i] = {'X': np.reshape(ds[0][i-1], -1).astype(features_dtype),
'y': np.reshape(ds[1][i-1], -1).astype(targets_dtype),
'embeds': None}
return datadic
|
icanswim/cosmosis | learning.py | <gh_stars>0
from datetime import datetime
import logging
import random
import os
os.environ['NUMEXPR_MAX_THREADS'] = '16'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torch import no_grad, save, load, from_numpy, as_tensor, squeeze
from torch.utils.data import Sampler, DataLoader
from torch.nn.functional import softmax
from sklearn import metrics
class Metrics():
#TODO checkpointing and early stopping
def __init__(self, report_interval=10, sk_metric_name=None, sk_params={}):
self.start = datetime.now()
self.report_time = self.start
self.report_interval = report_interval
self.epoch, self.e_loss, self.predictions = 0, [], []
self.train_loss, self.val_loss, self.lr_log = [], [], []
self.sk_metric_name, self.sk_params = sk_metric_name, sk_params
self.skm, self.sk_train_log, self.sk_val_log = None, [], []
self.sk_y, self.sk_pred = [], []
if self.sk_metric_name is not None:
self.skm = getattr(metrics, self.sk_metric_name)
logging.basicConfig(filename='./logs/cosmosis.log', level=20)
self.log('\nNew Experiment: {}'.format(self.start))
def infer(self):
self.predictions = np.concatenate(self.predictions, axis=0)
self.predictions = np.reshape(self.predictions, (-1, 2))
self.predictions = pd.DataFrame(self.predictions, columns=['id','predictions'])
self.predictions['id'] = self.predictions['id'].astype('int64')
print('self.predictions.iloc[:10]', self.predictions.shape, self.predictions.iloc[:10])
self.predictions.to_csv('cosmosis_inference.csv',
header=['id','predictions'],
index=False)
print('inference complete and saved to csv...')
def sk_metric(self, flag):
if self.skm is not None:
def softmax(x):
return np.exp(x)/sum(np.exp(x))
y = np.reshape(np.vstack(np.asarray(self.sk_y, 'float64')), -1)
y_pred = np.vstack(np.asarray(self.sk_pred, 'float64'))
if self.sk_metric_name == 'roc_auc_score':
y_pred = np.apply_along_axis(softmax, 1, y_pred)
score = self.skm(y, y_pred, **self.sk_params)
if flag == 'train':
self.sk_train_log.append(score)
else:
self.sk_val_log.append(score)
self.sk_y, self.sk_pred = [], []
else:
self.sk_train_log.append(0)
self.sk_val_log.append(0)
def loss(self, flag, loss):
if flag == 'train':
self.train_loss.append(loss)
if flag == 'val':
self.val_loss.append(loss)
if flag == 'test':
self.log('test loss: {}'.format(loss))
print('test loss: {}'.format(loss))
def log(self, message):
logging.info(message)
def status_report(self):
elapsed = datetime.now() - self.report_time
if elapsed.total_seconds() > self.report_interval or self.epoch % 10 == 0:
print('learning time: {}'.format(datetime.now()-self.start))
print('epoch: {}, lr: {}'.format(self.epoch, self.lr_log[-1]))
print('train loss: {}, val loss: {}'.format(self.train_loss[-1], self.val_loss[-1]))
print('sklearn train metric: {}, sklearn validation metric: {}'.format(
self.sk_train_log[-1], self.sk_val_log[-1]))
self.report_time = datetime.now()
def report(self):
elapsed = datetime.now() - self.start
self.log('learning time: {} \n'.format(elapsed))
print('learning time: {}'.format(elapsed))
self.log('sklearn test metric: \n{} \n'.format(self.sk_val_log[-1]))
print('sklearn test metric: \n{} \n'.format(self.sk_val_log[-1]))
pd.DataFrame(zip(self.train_loss, self.val_loss, self.lr_log, self.sk_val_log),
columns=['train_loss','val_loss','lr','sk_metric']).to_csv(
'./logs/'+self.start.strftime("%Y%m%d_%H%M"))
self.view_log('./logs/'+self.start.strftime('%Y%m%d_%H%M'))
@classmethod
def view_log(cls, log_file):
log = pd.read_csv(log_file)
log.iloc[:,1:5].plot(logy=True)
plt.show()
class Selector(Sampler):
"""splits = (train_split,) remainder is val_split or
(train_split,val_split) remainder is test_split or
None
"""
def __init__(self, dataset_idx=None, train_idx=None, val_idx=None, test_idx=None,
splits=(.7,.15), set_seed=False, subset=False):
self.set_seed = set_seed
if dataset_idx == None:
self.dataset_idx = train_idx
else:
self.dataset_idx = dataset_idx
self.train_idx, self.val_idx, self.test_idx = train_idx, val_idx, test_idx
if set_seed:
random.seed(set_seed)
random.shuffle(self.dataset_idx)
if subset:
sub = int(len(self.dataset_idx)*subset)
self.dataset_idx = self.dataset_idx[:sub]
if len(splits) == 1:
cut1 = int(len(self.dataset_idx)*splits[0])
self.train_idx = self.dataset_idx[:cut1]
self.val_idx = self.dataset_idx[cut1:]
if len(splits) == 2:
cut1 = int(len(self.dataset_idx)*splits[0])
cut2 = int(len(self.dataset_idx)*splits[1])
self.train_idx = self.dataset_idx[:cut1]
self.val_idx = self.dataset_idx[cut1:cut1+cut2]
self.test_idx = self.dataset_idx[cut1+cut2:]
random.seed()
def __iter__(self):
if self.flag == 'train':
return iter(self.train_idx)
if self.flag == 'val':
return iter(self.val_idx)
if self.flag == 'test':
return iter(self.test_idx)
if self.flag == 'infer':
return iter(self.dataset_idx)
def __len__(self):
if self.flag == 'train':
return len(self.train_idx)
if self.flag == 'val':
return len(self.val_idx)
if self.flag == 'test':
return len(self.test_idx)
if self.flag == 'infer':
return len(self.dataset_idx)
def __call__(self, flag):
self.flag = flag
return self
def shuffle_train_val_idx(self):
if self.set_seed:
random.seed(self.set_seed)
random.shuffle(self.val_idx)
random.shuffle(self.train_idx)
random.seed()
class Learn():
"""
Datasets = [TrainDS, ValDS, TestDS]
if 1 DS is given it is split into train/val/test using splits param
if 2 DS are given first one is train/val second is test
if 3 DS are given first is train second is val third is test
Criterion = None implies inference mode
TODO: early stopping/checkpoints
load_model = False/'saved_model.pth'/'saved_model.pk'
squeeze_y = True/False (torch.squeeze(y))
"""
def __init__(self, Datasets, Model, Sampler=Selector, Metrics=Metrics,
Optimizer=None, Scheduler=None, Criterion=None,
ds_params={}, model_params={}, sample_params={},
opt_params={}, sched_params={}, crit_params={}, metrics_params={},
adapt=False, load_model=False, load_embed=False, save_model=False,
batch_size=10, epochs=1, squeeze_y=False):
self.bs = batch_size
self.squeeze_y = squeeze_y
self.ds_params = ds_params
self.dataset_manager(Datasets, Sampler, ds_params, sample_params)
self.metrics = Metrics(**metrics_params)
self.metrics.log('model: {}\n{}'.format(Model, model_params))
self.metrics.log('dataset: {}\n{}'.format(Datasets, ds_params))
self.metrics.log('sampler: {}\n{}'.format(Sampler, sample_params))
self.metrics.log('epochs: {}, batch_size: {}, save_model: {}, load_model: {}'.format(
epochs, batch_size, save_model, load_model))
if load_model:
try: #uses the same embed params for all datasets (train/val/test)
model = Model(**model_params)
model.load_state_dict(load('./models/'+load_model))
print('model loaded from state_dict...')
except:
model = load('./models/'+load_model)
print('model loaded from pickle...')
else:
model = Model(**model_params)
if load_embed:
for i, embedding in enumerate(model.embeddings):
try:
weight = np.load('./models/{}_{}_embedding_weight.npy'.format(
load_embed, i))
embedding.from_pretrained(from_numpy(weight),
freeze=model_params['embeds'][i][4])
print('loading embedding weights...')
except:
print('no embedding weights found. initializing... ')
if adapt:
model.adapt(adapt)
self.model = model.to('cuda:0')
self.metrics.log(self.model.children)
if Criterion:
self.criterion = Criterion(**crit_params).to('cuda:0')
self.metrics.log('criterion: {}\n{}'.format(self.criterion, crit_params))
self.opt = Optimizer(self.model.parameters(), **opt_params)
self.metrics.log('optimizer: {}\n{}'.format(self.opt, opt_params))
self.scheduler = Scheduler(self.opt, **sched_params)
self.metrics.log('scheduler: {}\n{}'.format(self.scheduler, sched_params))
for e in range(epochs):
self.metrics.epoch = e
self.sampler.shuffle_train_val_idx()
self.run('train')
with no_grad():
self.run('val')
with no_grad():
self.run('test')
else: # no Criterion implies inference mode
with no_grad():
self.run('infer')
if save_model:
if adapt: save(self.model, './models/{}.pth'.format(
self.metrics.start.strftime("%Y%m%d_%H%M")))
if not adapt: save(self.model.state_dict(), './models/{}.pth'.format(
self.metrics.start.strftime("%Y%m%d_%H%M")))
if hasattr(self.model, 'embeddings'):
for i, embedding in enumerate(self.model.embeddings):
weight = embedding.weight.detach().cpu().numpy()
np.save('./models/{}_{}_embedding_weight.npy'.format(
self.metrics.start.strftime("%Y%m%d_%H%M"), i), weight)
self.metrics.report()
def run(self, flag):
e_loss, e_sk, i = 0, 0, 0
if flag == 'train':
self.model.training = True
dataset = self.train_ds
drop_last = True
if flag == 'val':
self.model.training = False
dataset = self.val_ds
drop_last = True
if flag == 'test':
self.model.training = False
dataset = self.test_ds
drop_last = True
if flag == 'infer':
self.model.training = False
dataset = self.test_ds
drop_last = False
dataloader = DataLoader(dataset, batch_size=self.bs,
sampler=self.sampler(flag=flag),
num_workers=8, pin_memory=True,
drop_last=drop_last)
def to_cuda(data):
if len(data) == 0: return None
else: return data.to('cuda:0', non_blocking=True)
for X, embeds, y in dataloader:
i += self.bs
X = to_cuda(as_tensor(X))
if len(embeds) > 0:
embeds = [to_cuda(as_tensor(emb)) for emb in embeds]
y_pred = self.model(X, embeds)
else:
y_pred = self.model(X)
if flag == 'infer':
self.metrics.predictions.append(np.concatenate((y_pred, y), axis=1))
else:
y = to_cuda(as_tensor(y))
if self.squeeze_y:
y = squeeze(y)
self.opt.zero_grad()
b_loss = self.criterion(y_pred, y)
e_loss += b_loss.item()
if self.metrics.skm is not None:
self.metrics.sk_y.append(y.detach().cpu().numpy())
self.metrics.sk_pred.append(y_pred.detach().cpu().numpy())
if flag == 'train':
b_loss.backward()
self.opt.step()
if flag == 'infer':
self.metrics.infer()
else:
self.metrics.loss(flag, e_loss/i)
self.metrics.sk_metric(flag)
if flag == 'val':
self.scheduler.step(e_loss/i)
self.metrics.lr_log.append(self.opt.param_groups[0]['lr'])
self.metrics.status_report()
def dataset_manager(self, Datasets, Sampler, ds_params, sample_params):
if len(Datasets) == 1:
self.train_ds = Datasets[0](**ds_params['train_params'])
self.val_ds = self.test_ds = self.train_ds
self.sampler = Sampler(dataset_idx=self.train_ds.ds_idx,
**sample_params)
if len(Datasets) == 2:
self.train_ds = Datasets[0](**ds_params['train_params'])
self.val_ds = self.train_ds
self.test_ds = Datasets[1](**ds_params['test_params'])
self.sampler = Sampler(train_idx=self.train_ds.ds_idx,
test_idx=self.test_ds.ds_idx,
**sample_params)
if len(Datasets) == 3:
self.train_ds = Datasets[0](**ds_params['train_params'])
self.val_ds = Datasets[1](**ds_params['val_params'])
self.test_ds = Datasets[2](**ds_params['test_params'])
self.sampler = Sampler(train_idx=self.train_ds.ds_idx,
val_idx=self.val_ds.ds_idx,
test_idx=self.test_ds.ds_idx,
**sample_params)
|
full-stakk/flask-rest | app.py | """simple flask rest api."""
from flask import Flask, g, jsonify
from flask import render_template
from auth import auth
import models
import config
from resources.users import users_api
from resources.articles import articles_api
app = Flask(__name__)
app.register_blueprint(users_api, url_prefix='/api/v1')
app.register_blueprint(articles_api, url_prefix='/api/v1')
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def index(path):
"""Route definition for the index page."""
return render_template('index.html')
@app.route('/api/v1/users/token', methods=['GET'])
@auth.login_required
def get_auth_token():
token = g.user.generate_auth_token()
return jsonify({'token': token.decode('ascii')})
"""Start server if app.py is run directly"""
if __name__ == '__main__':
models.initialize()
app.run(debug=config.DEBUG, port=config.PORT, host=config.HOST)
|
full-stakk/flask-rest | resources/users.py | """This module handles calls to the database based on URIs it recieves."""
from flask.ext.restful import (Resource, Api, fields, marshal, marshal_with,
reqparse, abort)
from flask import jsonify, Blueprint, make_response, g
from auth import auth
import models
import json
# Response definitions
user_fields = {
'name': fields.String,
'email': fields.String,
'password': fields.String,
'created_at': fields.String
}
class UserList(Resource):
"""Returns a list of users."""
def get(self):
"""return a list of users."""
users = [marshal(user, {'name': fields.String, 'email': fields.String}) for user in models.User.select()]
return (users, 201, {
'message': 'Found Users'
})
class User(Resource):
"""Handles user methods."""
@marshal_with(user_fields)
@auth.login_required
def get(self):
"""get a user."""
try:
user = models.User.select().where(
models.User.email == g.user.email
).get()
except models.Article.DoesNotExist:
return make_response(json.dumps(
{'error': 'That user does not exist'}
), 403)
return (user, 201, {
'message': 'Found User'
})
def put(self):
"""update a user."""
return jsonify({'user': 'Unimplemented Method'})
@auth.login_required
def delete(self):
"""delete a user."""
try:
user = models.User.select().where(
models.User.email == g.user.email
).get()
except models.Article.DoesNotExist:
return make_response(json.dumps(
{'error': 'That user does not exist'}
), 403)
query = user.delete()
query.execute()
return ('Deleted', 204)
def post(self):
"""create a user."""
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='name is required', required=True)
parser.add_argument('email', type=str, help='email is required', required=True)
parser.add_argument('password', type=str, help='password is requried', required=True)
args = parser.parse_args()
user = models.User.create_user(**args)
if user is None:
return jsonify({
'user': 'Empty',
'message': 'Email already exists'
})
else:
return jsonify({
'user': marshal(user, user_fields),
'message': 'User created'
})
"""
Proxy to Blueprint module
arg 1 -- the location of the resource resources/users
arg 2 -- the namespace of the resources
"""
users_api = Blueprint('resources.users', __name__)
api = Api(users_api)
"""
Add resource logic to api routes
arg 1 -- resource to use
arg 2 -- the URI to use
arg 3 -- the name of the endpoint
"""
api.add_resource(
UserList,
'/users',
endpoint='users'
)
api.add_resource(
User,
'/user',
endpoint='user'
)
|
full-stakk/flask-rest | config.py | <filename>config.py
"""Configurations for the app."""
DEBUG = True
HOST = '0.0.0.0'
PORT = 8000
SECRET = '<KEY>'
|
full-stakk/flask-rest | auth.py | from flask import g
from flask.ext.httpauth import HTTPBasicAuth, HTTPTokenAuth, MultiAuth
import models
basic_auth = HTTPBasicAuth()
token_auth = HTTPTokenAuth(scheme='Token')
auth = MultiAuth(token_auth, basic_auth)
@basic_auth.verify_password
def verify_password(email, password):
try:
user = models.User.get(models.User.email == email)
if not user.verify_password(password):
return False
except models.User.DoesNotExist:
return False
else:
g.user = user
return True
@token_auth.verify_token
def verify_token(token):
user = models.User.verify_auth_token(token)
if user is not None:
g.user = user
return True
else:
return False
|
full-stakk/flask-rest | models.py | """Database models."""
import datetime
import bcrypt
import config
from peewee import *
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer,
BadSignature, SignatureExpired)
DATABASE = SqliteDatabase('app.db')
class BaseModel(Model):
"""Specifies database connection."""
class Meta:
database = DATABASE
class User(BaseModel):
"""A user of the app."""
name = CharField()
email = CharField(unique=True)
password = CharField()
created_at = DateTimeField(default=datetime.datetime.now)
@classmethod
def create_user(cls, email, password, name):
email = email.lower()
try:
cls.select().where(cls.email == email).get()
except cls.DoesNotExist:
user = cls(email=email, name=name)
user.password = user.set_password(password)
user.save()
return user
else:
return None
@staticmethod
def verify_auth_token(token):
serializer = Serializer(config.SECRET)
try:
data = serializer.loads(token)
except (SignatureExpired, BadSignature):
return None
else:
user = User.get(User.id == data['id'])
return user
@staticmethod
def set_password(password):
return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())
def verify_password(self, password):
hashed = bcrypt.hashpw(password.encode('utf-8'), self.password.encode('utf-8'))
return hashed == self.password.encode('utf-8')
def generate_auth_token(self, expires=3600):
serializer = Serializer(config.SECRET, expires_in=expires)
return serializer.dumps({'id': self.id})
class Article(BaseModel):
"""A single article written by a user."""
title = CharField()
body = CharField()
user = ForeignKeyField(User, related_name='articles')
created_at = DateTimeField(default=datetime.datetime.now)
def initialize():
"""Initialize database connection and create models."""
DATABASE.connect()
DATABASE.create_tables([User, Article], safe=True)
DATABASE.close()
|
full-stakk/flask-rest | resources/articles.py | """This module handles calls to the database based on URIs it recieves."""
from flask.ext.restful import (Resource, Api, fields, marshal_with, marshal,
reqparse, abort)
from flask import jsonify, Blueprint, make_response, g
from auth import auth
import json
import models
import datetime
# Response definitions
article_fields = {
'id': fields.Integer,
'title': fields.String,
'body': fields.String,
'created_at': fields.DateTime
}
def article_or_404(id):
try:
article = models.Article.select().where(models.Article.id == id).get()
except models.Article.DoesNotExist:
abort(404, message="Article does not exist.")
else:
return article
class ArticleList(Resource):
"""Returns a list of articles."""
def get(self):
"""return a list of articles."""
parser = reqparse.RequestParser()
parser.add_argument('email', type=str, help="Email is required", required=True)
args = parser.parse_args()
if(args['email'] == 'all'):
articles = [marshal(article, article_fields) for article in models.Article.select()]
else:
user = models.User.select().where(models.User.email == args['email']).get()
articles = [marshal(article, article_fields) for article in models.Article.select().where(models.Article.user == user)]
return articles
@auth.login_required
@marshal_with(article_fields)
def post(self):
"""create a articles."""
parser = reqparse.RequestParser()
parser.add_argument('email', type=str, help="Email is required", required=True)
parser.add_argument('title', type=str, help="Title is required", required=True)
parser.add_argument('body', type=str, help="Article is required", required=True)
args = parser.parse_args()
article = models.Article.create(
title=args['title'],
body=args['body'],
user=g.user,
created_at=datetime.datetime.now()
)
return (article, 201, {
'id': article.id
})
class Article(Resource):
"""Handles article methods."""
@marshal_with(article_fields)
def get(self):
"""get a article."""
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help="Id is required", required=True)
args = parser.parse_args()
return article_or_404(args['id'])
@auth.login_required
def put(self, id):
"""update a article."""
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help="Id is required", required=True)
args = parser.parse_args()
try:
article = models.Article.select().where(
models.Article.user == g.user,
models.Article.id == id
).get()
except models.Article.DoesNotExist:
return make_response(json.dumps(
{'error': 'That article does not exist'}
), 403)
query = article.update(**args)
query.execute()
return (
models.Article.select().where(models.Article.id == args['id']),
200,
{'id': id}
)
@auth.login_required
def delete(self, id):
"""delete a articles."""
try:
article = models.Article.select().where(
models.Article.user == g.user,
models.Article.id == id
).get()
except models.Article.DoesNotExist:
return make_response(json.dumps(
{'error': 'That article does not exist'}
), 403)
query = article.delete()
query.execute()
return article.id, 204, {'message': 'Deleted'}
"""
Proxy to Blueprint module
arg 1 -- the location of the resource resources/users
arg 2 -- the namespace of the resources
"""
articles_api = Blueprint('resources.articles', __name__)
api = Api(articles_api)
"""
Add resource logic to api routes
arg 1 -- resource to use
arg 2 -- the URI to use
arg 3 -- the name of the endpoint
"""
api.add_resource(
ArticleList,
'/articles',
endpoint='articles'
)
api.add_resource(
Article,
'/article',
endpoint='article'
)
|
mcarey-solstice/mock-security | src/cli.py | <reponame>mcarey-solstice/mock-security<gh_stars>1-10
#!/usr/bin/env python3
import sys
import argparse
from keychain import create_keychain, delete_keychain, default_keychain, set_keychain_settings, unlock_keychain, lock_keychain, import_certificate
parser = argparse.ArgumentParser(description='Mock security')
subparsers = parser.add_subparsers(help='sub-command help')
create_keychain_parser = subparsers.add_parser('create-keychain', help='Creates a keychain')
create_keychain_parser.add_argument('-p',
dest='password', help='The password for the keychain')
create_keychain_parser.add_argument('name', help='The keychain to create')
create_keychain_parser.set_defaults(func=create_keychain)
delete_keychain_parser = subparsers.add_parser('delete-keychain', help='Deletes a keychain')
delete_keychain_parser.add_argument('-p', help='The password for the keychain')
delete_keychain_parser.add_argument('name', help='The keychain to delete')
delete_keychain_parser.set_defaults(func=delete_keychain)
default_keychain_parser = subparsers.add_parser('default-keychain', help='Sets the default keychain')
default_keychain_parser.add_argument('-s', dest='name', help='The password for the keychain')
default_keychain_parser.set_defaults(func=default_keychain)
unlock_keychain_parser = subparsers.add_parser('unlock-keychain', help='Unlocks the keychain')
unlock_keychain_parser.add_argument('-p', dest='password', help='The password for the keychain')
unlock_keychain_parser.add_argument('name', help='The keychain to unlock')
unlock_keychain_parser.set_defaults(func=unlock_keychain)
lock_keychain_parser = subparsers.add_parser('lock-keychain', help='Locks the keychain')
lock_keychain_parser.add_argument('name', help='The keychain to unlock')
lock_keychain_parser.set_defaults(func=lock_keychain)
set_keychain_settings_parser = subparsers.add_parser('set-keychain-settings', help='Sets keychain settings')
set_keychain_settings_parser.add_argument('-t', dest='timeout', help='The timeout for the keychain')
set_keychain_settings_parser.add_argument('-l', dest='lock_on_sleep', action='store_true', help='Lock keychain when the system sleeps')
set_keychain_settings_parser.add_argument('name', help='The keychain location')
set_keychain_settings_parser.set_defaults(func=set_keychain_settings)
import_parser = subparsers.add_parser('import', help='Imports a secret')
import_parser.add_argument('-k', dest='name', help='The key chain to add this secret to')
import_parser.add_argument('-P', dest='passphrase', help='The passphrase for the secret')
import_parser.add_argument('-T', dest='applications', nargs='*', help='The applications that are allowed to use the certificate')
import_parser.add_argument('filepath', help='The certificate location')
import_parser.set_defaults(func=import_certificate)
if __name__ == '__main__':
argv = sys.argv[1:]
if len(argv) < 1:
argv = ['--help']
args = parser.parse_args(argv)
args.func(**vars(args))
# fi
# cli
|
mcarey-solstice/mock-security | src/keychain.py | <reponame>mcarey-solstice/mock-security
###
#
##
import os
import sys
import json
from helpers import classproperty
def _get_keychain_file():
return os.environ.get('KEYCHAIN_FILE', './keychains.json')
# _get_keychain_file
class Keychain(object):
__keychains__ = {}
def __init__(self, name, **kwargs):
self.name = name
self.locked = kwargs.pop('locked', False)
self.timeout = kwargs.pop('timeout', -1)
self.owner = kwargs.pop('owner', None)
self.default = kwargs.pop('default', False)
self.password = kwargs.pop('password', None)
self.lock_on_sleep = kwargs.pop('lock_on_sleep', False)
self.certificates = [c if isinstance(c, Certificate) else Certificate(**c) for c in kwargs.pop('certificates', [])]
if len(Keychain.__keychains__) < 1:
self.default = True
# fi
Keychain.__keychains__[self.name] = self
# __init__
@classproperty
def KEYCHAINS(cls):
data = {}
for key, val in cls.__keychains__.items():
data[key] = val.__properties__
# done
return data
# keychains
@classproperty
def DEFAULT(cls):
for k, v in cls.__keychains__.items():
if v.default is True:
return v
# fi
# done
# default
@property
def __properties__(self):
return {
"name": self.name,
"timeout": self.timeout,
"owner": self.owner,
"default": self.default,
"password": <PASSWORD>,
"locked": self.locked,
"lock_on_sleep": self.lock_on_sleep,
"certificates": [c.__properties__ for c in self.certificates]
}
# __properties__
def unlock(self, **kwargs):
if kwargs.pop('password', None) != self.password:
raise Exception('Password does not match')
# fi
self.locked = False
# unlock
def lock(self):
self.locked = True
# unlock
def import_certificate(self, filepath, **kwargs):
self.certificates.append(Certificate(filepath, **kwargs))
# import_cert
@classmethod
def find(cls, name, default=None):
if name in cls.__keychains__:
return cls.__keychains__[name]
# fi
if default is not None:
return default
# fi
raise Exception('No keychain `%s` found' % name)
# find
@classmethod
def set_default(cls, name):
default = cls.DEFAULT
if default is not None:
default.default = False
# fi
cls.find(name).default = True
# set_default
@classmethod
def status(cls):
print(json.dumps(cls.__dict__))
# status
@classmethod
def load(cls, filename):
if os.path.isfile(filename):
with open(filename, 'r') as f:
data = {}
try:
data = json.load(f)
except json.decoder.JSONDecodeError as e:
print("Could not decode json for %s. Moving on" % filename)
#
for key, val in data.get('keychains', {}).items():
cls.__keychains__[key] = Keychain(**val)
# done
# end
# fi
# load
@classmethod
def save(cls, filename):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as f:
json.dump({
"keychains": cls.KEYCHAINS
}, f, indent=2)
# end
# save
@classmethod
def delete(cls, name):
obj = cls.find(name)
del cls.__keychains__[obj.name]
# delete
# Keychain
class Certificate(object):
def __init__(self, filepath, **kwargs):
self.filepath = filepath
self.applications = kwargs.pop('applications', [])
self.passphrase = kwargs.pop('passphrase', None)
# __init__
@property
def __properties__(self):
return {
"filepath": self.filepath,
"applications": self.applications,
"passphrase": self.passphrase
}
# __properties__
# Cert
###
# Utility functions
##
def keychain_decorator(fn):
def _keychain_decorator(*args, **kwargs):
Keychain.load(_get_keychain_file())
value = fn(*args, **kwargs)
Keychain.save(_get_keychain_file())
return value
# _keychain_decorator
return _keychain_decorator
# keychain_decorator
@keychain_decorator
def create_keychain(name, **kwargs):
Keychain(name, **kwargs)
# create_keychain
@keychain_decorator
def default_keychain(name, **kwargs):
Keychain.find(name).default = True
# default_keychain
@keychain_decorator
def delete_keychain(name):
Keychain.delete(name)
# delete_keychain
@keychain_decorator
def import_keychain():
pass
# import_keychain
@keychain_decorator
def set_keychain_settings(name, **kwargs):
keychain = Keychain.find(name)
for k, v in kwargs.items():
setattr(keychain, k, v)
# done
# set_keychain_settings
@keychain_decorator
def unlock_keychain(name, **kwargs):
Keychain.find(name).unlock(**kwargs)
# unlock_keychain
@keychain_decorator
def lock_keychain(name, **kwargs):
Keychain.find(name).lock()
# unlock_keychain
@keychain_decorator
def import_certificate(name=None, **kwargs):
Keychain.find(name, Keychain.DEFAULT).import_certificate(**kwargs)
# import_certificate
if __name__ == '__main__':
Keychain.load(_get_keychain_file())
Keychain.status()
# fi
# Keychain
|
mcarey-solstice/mock-security | src/__init__.py | ###
#
##
from keychain import Keychain, create_keychain, delete_keychain, default_keychain, set_keychain_settings, unlock_keychain, lock_keychain, import_certificate
from cli import parser
#
|
Soostone/cassy | test/Test.py | from pycassa.types import *
from pycassa.system_manager import *
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
def create_ks():
# create test keyspace
sys = SystemManager()
comparator = CompositeType(LongType(), BytesType())
sys.create_column_family("testing", "testing", comparator_type=comparator)
pool = ConnectionPool('testing')
cf = ColumnFamily(pool, 'testing')
# Check the column added by the Haskell test script
# print [k for k in cf.get_range()]
# cf.insert("row2", {(125, 'oklahoma'): 'asdf'})
print cf.get('row1')
print cf.get('row2')
# should see: OrderedDict([((125, 'oklahoma'), 'asdf')])
|
ethframe/descent | examples/calc.py | <reponame>ethframe/descent
from descent.case import CaseUnapply
from descent.helpers import parser_from_source
CALC_GRAMMAR = r"""
t<S> <- S~ _
t<S, T> <- t<S> T
etail<O, E> <- O^left E:right
lexpr<E, O> <- E etail<O, E>*
rexpr<E, O> <- E etail<O, this>?
unary<E, O> <- O this:expr / E
paren<O, E, C> <- t<O> E t<C>
oneopt<A, B> <- A B? / B
calc <- _ expr !.
expr <- p0
p0 <- lexpr<p1, t<"+", @Add> / t<"-", @Sub>>
p1 <- lexpr<p2, t<"*", @Mul> / t<"/", @Div>>
p2 <- rexpr<p3, t<"**", @Pow>>
p3 <- unary<p4, t<"-", @Neg>>
p4 <- num / paren<"(", expr, ")">
num <- @Int
"-"? ("0" / [1-9][0-9]*)
(oneopt<"."[0-9]+, [eE][-+]?[0-9]+> @Float^^)? _
_ <- ([ \t\r\n]*)~
"""
class Evaluator(CaseUnapply):
def add(self, left, right):
return self(left) + self(right)
def sub(self, left, right):
return self(left) - self(right)
def mul(self, left, right):
return self(left) * self(right)
def div(self, left, right):
return self(left) / self(right)
def pow(self, left, right):
return self(left) ** self(right)
def neg(self, val):
return -self(val)
def int(self, val):
return int(val)
def float(self, val):
return float(val)
def main():
calc_parser = parser_from_source(CALC_GRAMMAR)
parse = calc_parser.parse
evaluate = Evaluator()
calc = lambda s: evaluate(parse(s))
print(calc("(1 + 2) ** 2 - 2 * 3 + 11 / 2.0 + - (3 / 2)"))
print(calc("2 ** 3 ** 2 + -3 * ----4"))
if __name__ == '__main__':
main()
|
ethframe/descent | descent/fixpoint.py | from descent.case import CaseUnapply1
class CaseFix(CaseUnapply1):
def __init__(self, fix, **kwargs):
super().__init__(**kwargs)
self.fix = fix
def reference(self, val, *args):
return self.fix.get(val, *args)
class State:
def __init__(self, keys, bot):
self.bot = bot
self.storage = {k: {} for k in keys}
self.changed = False
def update(self, val, key, *args):
if self.storage[key][args] != val:
self.storage[key][args] = val
self.changed = True
def get(self, key, *args):
if args not in self.storage[key]:
self.storage[key][args] = self.bot
self.changed = True
return self.storage[key][args]
def next(self):
changed = self.changed
self.changed = False
return changed
def __iter__(self):
for rule, rule_state in self.storage.items():
for args in list(rule_state):
yield rule, args
def fix(case, bottom):
def fn(gram, rules, *args, **kwargs):
state = State(gram.keys(), bottom)
op = case(state, **kwargs)
for rule in rules:
state.get(rule, *args)
while state.next():
for rule, args in state:
state.update(op(gram[rule], *args), rule, *args)
return state.storage
return fn
def result_by_args(state, *args):
return {rule: res[args] for rule, res in state.items() if args in res}
|
ethframe/descent | descent/case.py | <gh_stars>1-10
class Case:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __call__(self, val, *args):
method = getattr(self, type(val).__name__.lower())
return method(val, *args)
class CaseUnapply1(Case):
def __call__(self, val, *args):
method = getattr(self, type(val).__name__.lower())
return method(val.unapply1(), *args)
class CaseUnapply(Case):
def __call__(self, val, *args):
method = getattr(self, type(val).__name__.lower())
return method(*val.unapply(), *args)
|
ethframe/descent | examples/jsonparser.py | from descent.case import CaseUnapply1
from descent.helpers import parser_from_source
JSON_GRAMMAR = r"""
t<S> <- S~ _
t<S, T> <- t<S> T
list<I> <- (I:items (t<","> I:items)*)?
collection<T, O, I, C> <- t<O, T> list<I> t<C>
oneopt<A, B> <- A B? / B
json <- _ value !.
value <- string / number / object / array / true / false / null
object <- collection<@Object, "{", pair, "}">
pair <- @Pair string:key t<":"> value:value
array <- collection<@Array, "[", value, "]">
string <- '"'~ @String char::* '"'~ _
char <- @char (!["\\\b\f\t\r\n] . / "\\"~ ["\\/])
/ @escape "\\"~ ("b":"\b" / "f":"\f" / "t":"\t" /
"r":"\r" / "n":"\n")
/ @unicode "\\u"~ hex hex hex hex
hex <- [0-9a-fA-F]
number <- @Number
"-"? ("0" / [1-9][0-9]*)
(oneopt<"."[0-9]+, [eE][-+]?[0-9]+> @Float^^)? _
_
true <- t<"true", @True_>
false <- t<"false", @False_>
null <- t<"null", @Null>
_ <- ([ \t\r\n]*)~
"""
JSON_CONVERTERS = {
"unicode": lambda v: chr(int(v, 16))
}
class Converter(CaseUnapply1):
def object(self, val):
return {self(p.key): self(p.value) for p in val}
def number(self, val):
return int(val)
def float(self, val):
return float(val)
def string(self, val):
return val
def array(self, val):
return [self(v) for v in val]
def true_(self, val):
return True
def false_(self, val):
return False
def null(self, val):
return None
def main():
json_parser = parser_from_source(JSON_GRAMMAR, JSON_CONVERTERS)
parsed = json_parser.parse("""
{
"some": 1,
"json": [
1,
3.14,
4,
"tab\\tunicode\\u0010"
],
"inner": {
"bool": false,
"val": null
}
}
""")
print(parsed)
print(Converter()(parsed))
if __name__ == '__main__':
main()
|
ethframe/descent | descent/typeinference.py | <reponame>ethframe/descent
from collections import OrderedDict
from descent.asttypes import (
InvalidType, UnknownType, EmptyType, StringType,
NamedType, TokenType, NodeType, merge_types
)
from descent.fixpoint import CaseFix, fix
class TypeInference(CaseFix):
def sequence(self, val, ctype):
for p in val:
ctype = self(p, ctype)
return ctype
def choice(self, val, ctype):
return merge_types(self(p, ctype) for p in val)
def node(self, val, ctype):
if isinstance(ctype, InvalidType):
return ctype
self.reg.update(ctype)
return NamedType(val)
def top(self, val, ctype):
a = self(val.expr, EmptyType())
self.reg.update(ctype)
return a.append(ctype, str(val.name))
def append(self, val, ctype):
a = self(val.expr, EmptyType())
self.reg.update(a)
return ctype.append(a, str(val.name))
def splice(self, val, ctype):
a = self(val, EmptyType())
self.reg.update(a)
return ctype.splice(a)
def top_splice(self, val, ctype):
a = self(val, EmptyType())
self.reg.update(ctype)
return a.splice(ctype)
def ignore(self, val, ctype):
a = self(val, EmptyType())
if isinstance(a, InvalidType):
return a
self.reg.update(a)
return ctype
not_follow = follow = ignore
def replace(self, val, ctype):
a = self(val.expr, EmptyType())
self.reg.update(a)
return ctype.splice(StringType())
def char(self, val, ctype):
return ctype.splice(StringType())
string = char_any = char_range = char
def repeat(self, val, ctype):
ntype = merge_types((ctype, self(val, ctype)))
while ntype != ctype:
ctype = ntype
ntype = merge_types((ctype, self(val, ctype)))
return ntype
def repeat1(self, val, ctype):
ntype = self(val, ctype)
while ntype != ctype:
ctype = ntype
ntype = merge_types((ctype, self(val, ctype)))
return ntype
def optional(self, val, ctype):
return merge_types((ctype, self(val, ctype)))
def fail(self, val, ctype):
self.reg.update(ctype)
return UnknownType()
class Registry:
def __init__(self):
self.types = OrderedDict()
def add(self, tp):
if isinstance(tp, (NamedType, TokenType, NodeType)):
if tp.name not in self.types:
self.types[tp.name] = tp
else:
self.types[tp.name] = self.types[tp.name].merge(tp)
def update(self, tps):
for tp in tps:
self.add(tp)
infer = fix(TypeInference, UnknownType())
def infer_types(gram):
start = list(gram)[0]
reg = Registry()
inf = infer(gram, [start], EmptyType(), reg=reg)
top = inf[start].get((EmptyType(),), ())
if isinstance(top, InvalidType):
return None
reg.update(top)
return list(reg.types.values())
|
ethframe/descent | tests/test_grammar_check.py | <filename>tests/test_grammar_check.py
import py.test
from descent.parser import parse_grammar
from descent.macro import expand_macros
from descent.grammarcheck import check_grammar
WF_NULL = (True, True, False)
WF_NOT_NULL = (True, False, False)
WF_NOT_NULL_INV = (True, False, True)
NOT_WF_NULL = (False, True, False)
NOT_WF_NULL_INV = (False, True, True)
NOT_WF_NOT_NULL = (False, False, False)
NOT_WF_NOT_NULL_INV = (False, False, True)
check_cases = [
("A <- 'a'", {"A": WF_NOT_NULL}),
("A <- .", {"A": WF_NOT_NULL}),
("A <- ''", {"A": WF_NULL}),
("A <- 'a':''", {"A": WF_NOT_NULL}),
("A <- [a-z0-9_]", {"A": WF_NOT_NULL}),
("A <- []", {"A": WF_NOT_NULL}),
("A <- 'a' A / ''", {"A": WF_NULL}),
("A <- 'a' A / 'a'", {"A": WF_NOT_NULL}),
("A <- 'a' A", {"A": WF_NOT_NULL}),
("A <- 'a'?", {"A": WF_NULL}),
("A <- 'a'*", {"A": WF_NULL}),
("A <- 'a'+", {"A": WF_NOT_NULL}),
("A <- @a", {"A": WF_NULL}),
("A <- @a:a", {"A": WF_NULL}),
("A <- (@a 'a'):a", {"A": WF_NOT_NULL}),
("A <- @a^a", {"A": WF_NULL}),
("A <- (@a 'a')^a", {"A": WF_NOT_NULL}),
("A <- @a::", {"A": WF_NULL}),
("A <- (@a 'a')::", {"A": WF_NOT_NULL}),
("A <- @a^^", {"A": WF_NULL}),
("A <- (@a 'a')^^", {"A": WF_NOT_NULL}),
("A <- @a~", {"A": WF_NULL}),
("A <- (@a 'a')~", {"A": WF_NOT_NULL}),
("A <- !'a'", {"A": WF_NULL}),
("A <- &'a'", {"A": WF_NOT_NULL}),
("A <- A", {"A": NOT_WF_NULL}),
("A <- A 'a' / 'a'", {"A": NOT_WF_NULL}),
("A <- !A", {"A": NOT_WF_NULL}),
("A <- A?", {"A": NOT_WF_NULL}),
("A <- A*", {"A": NOT_WF_NULL_INV}),
("A <- A+", {"A": NOT_WF_NOT_NULL_INV}),
("A <- ' ' ('')*", {"A": WF_NOT_NULL_INV}),
]
@py.test.mark.parametrize("input, result", check_cases)
def test_parse(input, result):
assert check_grammar(expand_macros(parse_grammar(input))) == result
|
ethframe/descent | descent/codegen.py | import sys
from descent.asttypes import NamedType, TokenType, NodeType
def _indent(lines):
for line in lines:
if isinstance(line, list):
for indented in _indent(line):
yield " " + indented if indented else ""
else:
yield line
def _concat(lines):
return "\n".join(_indent(lines))
def _fmt_iter(fmt, it):
return [fmt.format(i) for i in it]
def _fmt_and_join(fmt, it, sep=", "):
return sep.join(_fmt_iter(fmt, it))
def _defm(name, args, body):
return ["def {}({}):".format(name, ", ".join(["self"] + args)), body, ""]
def gen_namedtype(tp):
return _concat([
"class {}:".format(tp.name),
_defm("__repr__", [], ["return '{}()'".format(tp.name)]),
_defm("__hash__", [], ["return hash(self.__class__)"]),
_defm("__eq__", ["other"], [
"return self.__class__ is other.__class__"
]),
_defm("unapply1", [], ["return self"]),
_defm("unapply", [], ["return (self,)"]),
_defm("copy", [], ["return self"]),
_defm("splice_to", ["other"], ["return other"]),
_defm("to_dict", [], ["return {{'__type__': {!r}}}".format(tp.name)]),
""
])
def gen_tokentype(tp):
return _concat([
"class {}:".format(tp.name),
_defm("__init__", ["val=''"], ["self.val = val"]),
_defm("__str__", [], ["return self.val"]),
_defm("__repr__", [], [
"return '{}({{!r}})'.format(self.val)".format(tp.name)
]),
_defm("__hash__", [], ["return hash((self.__class__, self.val))"]),
_defm("__eq__", ["other"], [
"return self.__class__ is other.__class__"
+ " and self.val == other.val"
]),
_defm("unapply1", [], ["return self.val"]),
_defm("unapply", [], ["return (self.val,)"]),
_defm("copy", [], ["return {}(self.val)".format(tp.name)]),
_defm("consume", ["val"], ["self.val += val", "return self"]),
_defm("splice_to", ["other", "converters"], [
"converter = converters.get('{}')".format(tp.name),
"if converter:",
["return other.consume(converter(self.val))"],
"return other.consume(self.val)"
]),
_defm("to_dict", [], [
"return {{'__type__': {!r}, 'value': self.val}}".format(tp.name)
]),
""
])
def gen_nodetype(tp):
lines = [
"class {}:".format(tp.name),
[
"__slots__ = ({!r},)".format(list(tp.fields)[0])
if len(tp.fields) == 1 else
"__slots__ = ({})".format(_fmt_and_join("{!r}", tp.fields)),
""
],
_defm("__init__", _fmt_iter("{}=None", tp.fields), [
(
"self.{0} = {0} or []" if field.arr else "self.{0} = {0}"
).format(name)
for name, field in tp.fields.items()
]),
_defm("__repr__", [], [
"return '{}({})'.format(".format(
tp.name, ", ".join(["{!r}"] * len(tp.fields))
),
_fmt_iter("self.{},", tp.fields),
")"
]),
_defm("__eq__", ["other"], [
"return (",
["self.__class__ is other.__class__"],
_fmt_iter("and self.{0} == other.{0}", tp.fields),
")"
]),
_defm("unapply1", [], [
"return self.{}".format(list(tp.fields)[0])
if len(tp.fields) == 1 else
"return self"
]),
_defm("unapply", [], [
"return (self.{},)".format(list(tp.fields)[0])
if len(tp.fields) == 1 else
"return ({})".format(_fmt_and_join("self.{}", tp.fields, ", "))
]),
_defm("copy", [], [
"return {}(".format(tp.name),
[
(
"list(self.{})," if tp.fields[name].arr else "self.{},"
).format(name) for name in tp.fields
],
")"
])
]
for name, field in tp.fields.items():
lines.append(
_defm("append_{}".format(name), ["val"], [
(
"self.{}.append(val)" if field.arr else "self.{} = val"
).format(name),
"return self"
])
)
if field.arr:
lines.append(
_defm("extend_{}".format(name), ["val"], [
"self.{}.extend(val)".format(name),
"return self"
])
)
body = []
for name, field in tp.fields.items():
if field.arr:
body.append("other.extend_{0}(self.{0})".format(name))
elif field.opt:
body.extend([
"if self.{} is not None:".format(name),
["other.append_{0}(self.{0})".format(name)]
])
else:
body.append("other.append_{0}(self.{0})".format(name))
body.append("return other")
lines.append(_defm("splice_to", ["other", "converters"], body))
body = ["'__type__': {!r},".format(tp.name)]
for name, field in tp.fields.items():
if field.arr:
body.append("{0!r}: [i.to_dict() for i in self.{0}],".format(name))
elif field.opt:
body.append(
"{0!r}: None if self.{0} is None"
" else self.{0}.to_dict(),".format(name)
)
else:
body.append("{0!r}: self.{0}.to_dict(),".format(name))
lines.extend([_defm("to_dict", [], ["return {", body, "}"]), ""])
return _concat(lines)
def gen_python_class(tp):
return {
NamedType: gen_namedtype,
TokenType: gen_tokentype,
NodeType: gen_nodetype,
}[tp.__class__](tp)
def gen_types_map(types):
lines = ["types_map = {"]
for t in types:
lines.append(" {0!r}: {0},".format(t.name))
lines.append("}")
return "\n".join(lines)
def gen_ast_module_src(types):
elements = []
for t in types:
elements.append(gen_python_class(t))
elements.append(gen_types_map(types))
return "\n".join(elements)
def gen_ast_module(types):
src = gen_ast_module_src(types)
module = type(sys)("ast")
exec(src, module.__dict__, module.__dict__)
return getattr(module, "types_map")
|
ethframe/descent | tests/test_type_inference.py | <gh_stars>1-10
from collections import OrderedDict as od
import py.test
from descent.parser import parse_grammar
from descent.macro import expand_macros
from descent.typeinference import infer_types
from descent.asttypes import NamedType, TokenType, NodeType, Field
type_cases = [
("A <- 'a'", set()),
("A <- @a", {NamedType("a")}),
("A <- @a 'a'", {TokenType("a")}),
("A <- @a .", {TokenType("a")}),
("A <- @a [a]", {TokenType("a")}),
("A <- @a [a-z]", {TokenType("a")}),
("A <- 'a' @a^^", {TokenType("a")}),
("A <- @a::", {NamedType("a")}),
("A <- @a @b:'a'", {TokenType("a"), NamedType("b")}),
("A <- @a / @b", {NamedType("a"), NamedType("b")}),
("A <- (@a / @b)::", {NamedType("a"), NamedType("b")}),
("A <- @a @b:a", {
NamedType("b"),
NodeType("a", od([("a", Field(False, False))]))
}),
("A <- @a (@b:a / @b:b)", {
NamedType("b"),
NodeType(
"a",
od([("a", Field(False, True)), ("b", Field(False, True))])
)
}),
("A <- @b @a^a", {
NamedType("b"),
NodeType("a", od([("a", Field(False, False))]))
}),
("A <- @a @b:a?", {
NamedType("b"),
NodeType("a", od([("a", Field(False, True))]))
}),
("A <- @a @b:a*", {
NamedType("b"),
NodeType("a", od([("a", Field(True, True))]))
}),
("A <- @a (@b:a / @b:b*)", {
NamedType("b"),
NodeType(
"a",
od([("a", Field(False, True)), ("b", Field(True, True))])
)
}),
("A <- @a @b:a+", {
NamedType("b"),
NodeType("a", od([("a", Field(True, False))]))
}),
("A <- @a A:a / @b", {
NamedType("b"),
NodeType("a", od([("a", Field(False, False))]))
}),
("A <- @a @b:a A:: / @a @b:a", {
NamedType("b"),
NodeType("a", od([("a", Field(True, False))]))
}),
("A <- @a~", {NamedType("a")}),
("A <- !@a", {NamedType("a")}),
("A <- &@a", {NamedType("a")}),
("A <- A:: / @a", {NamedType("a")}),
("A <- 'a' A:: / ''", set()),
("A <- 'a' 'a'::", set()),
("A <- @a 'a':a", None),
("A <- @a ('a'~):a", None),
("A <- @a (@b 'a'):a (@b 'a')::", None),
("A <- (@a 'a') (@b 'a'):a", None),
("A <- (@a 'a') (@b (@c 'a'):a)::", None),
("A <- @a @b:b? @b:a", {
NamedType("b"),
NodeType(
"a",
od([("b", Field(False, True)), ("a", Field(False, False))])
)
}),
("A <- @a @b:a @b:b / @a @b:b @b:a", {
NamedType("b"),
NodeType(
"a",
od([("a", Field(False, False)), ("b", Field(False, False))])
)
}),
("A <- A:: / @a 'a':a", None),
("A <- @a:a", None),
("A <- 'a' @a:a", None),
("A <- 'a' A:: / @a / @b", {NamedType("a"), NamedType("b")}),
("A <- 'a' ('a' @a:a)::", None),
("A <- @a A:: / @a 'a'", {TokenType("a")}),
("A <- @a A:: / @a (@b 'a'):a", {
TokenType("b"),
NodeType("a", od([("a", Field(False, False))]))
}),
("A <- @a A:: / @a", {NamedType("a")}),
("A <- @a A:: / (@a / @b)", {NamedType("a"), NamedType("b")}),
("A <- @a A:: / @a 'a':a", None),
]
@py.test.mark.parametrize("input, result", type_cases)
def test_parse(input, result):
types = infer_types(expand_macros(parse_grammar(input)))
assert types is None and result is None or set(types) == result
|
ethframe/descent | generate.py | <filename>generate.py
from descent.parser import parse_grammar
from descent.source import source, converters
from descent.macro import expand_macros
from descent.typeinference import infer_types
from descent.codegen import (
gen_python_class, gen_ast_module, gen_ast_module_src
)
from descent.combinators import compile_parser
def generate():
grammar = expand_macros(parse_grammar(source))
types = infer_types(grammar)
new_parser = compile_parser(grammar, gen_ast_module(types), converters)
grammar = expand_macros(new_parser.parse(source))
types = infer_types(grammar)
with open("descent/ast.py", "w") as fp:
fp.write(gen_ast_module_src(types))
with open("descent/grammar.py", "w") as fp:
fp.write("from collections import OrderedDict\n\n")
fp.write(
"from .ast import "
+ ", ".join(type_.name for type_ in types) + "\n\n\n"
)
fp.write("grammar = OrderedDict([\n")
for name, expr in grammar.items():
fp.write(" ({!r}, {!r}),\n".format(name, expr))
fp.write("])\n")
if __name__ == '__main__':
generate()
|
ethframe/descent | tests/test_grammar_parser.py | import py.test
from descent.parser import parse_grammar
from descent.ast import *
def single_rule_grammar(name, body):
return grammar([rule(name=reference(name), expr=body)])
parse_cases = [
("A <- 'a'", single_rule_grammar("A", string("a"))),
("A <- \"a\"", single_rule_grammar("A", string("a"))),
("A <- '\\n'", single_rule_grammar("A", string("\n"))),
("A <- '\\010'", single_rule_grammar("A", string("\010"))),
("A <- [a]", single_rule_grammar("A", char("a"))),
("A <- [a-z]", single_rule_grammar(
"A", char_range(start=char("a"), end=char("z"))
)),
("A <- []", single_rule_grammar("A", fail())),
("A <- [ab]", single_rule_grammar("A", choice([char("a"), char("b")]))),
("A <- .", single_rule_grammar("A", char_any())),
("A <- .:'a'", single_rule_grammar("A", replace(char_any(), string("a")))),
("A <- B", single_rule_grammar("A", reference("B"))),
("A <- B / C", single_rule_grammar(
"A", choice([reference("B"), reference("C")])
)),
("A <- B*", single_rule_grammar("A", repeat(reference("B")))),
("A <- B+", single_rule_grammar("A", repeat1(reference("B")))),
("A <- B?", single_rule_grammar("A", optional(reference("B")))),
("A <- !B", single_rule_grammar("A", not_follow(reference("B")))),
("A <- &B", single_rule_grammar("A", follow(reference("B")))),
("A <- @B", single_rule_grammar("A", node("B"))),
("A <- B~", single_rule_grammar("A", ignore(reference("B")))),
("A <- B:a", single_rule_grammar(
"A", append(reference("B"), reference("a"))
)),
("A <- B^a", single_rule_grammar(
"A", top(reference("B"), reference("a"))
)),
("A <- B::", single_rule_grammar("A", splice(reference("B")))),
("A <- B^^", single_rule_grammar("A", top_splice(reference("B")))),
]
@py.test.mark.parametrize("input, parsed", parse_cases)
def test_parse(input, parsed):
assert parse_grammar(input) == parsed
|
ethframe/descent | descent/parser.py | from descent.ast import types_map
from descent.combinators import compile_parser
from descent.grammar import grammar
from descent.source import converters
parser = compile_parser(grammar, types_map, converters)
parse_grammar = parser.parse
|
ethframe/descent | descent/combinators.py | <gh_stars>1-10
from descent.case import CaseUnapply1
class Tree:
def copy(self):
return self
class Empty(Tree):
pass
class Ignore(Tree):
def consume(self, val):
return self
class Rule:
def __init__(self, name):
self.name = name
self.body = None
def define(self, body):
self.body = body
def __call__(self, stream, pos, tree):
return self.body(stream, pos, tree)
def parse(self, stream):
return self(stream, 0, Empty())[1]
def sequence(*subparsers):
def _parser(stream, pos, tree):
org = pos
for parser in subparsers:
pos, tree = parser(stream, pos, tree)
if tree is None:
return org, None
return pos, tree
return _parser
def choice(*subparsers):
def _parser(stream, pos, tree):
for parser in subparsers:
new_pos, new_tree = parser(stream, pos, tree.copy())
if new_tree is not None:
return new_pos, new_tree
return pos, None
return _parser
def repeat(parser):
def _parser(stream, pos, tree):
while True:
current = pos, tree
pos, tree = parser(stream, pos, tree)
if tree is None:
return current
return _parser
def repeat1(parser):
def _parser(stream, pos, tree):
pos, tree = parser(stream, pos, tree)
if tree is None:
return pos, None
while True:
current = pos, tree
pos, tree = parser(stream, pos, tree)
if tree is None:
return current
return _parser
def optional(parser):
def _parser(stream, pos, tree):
new_pos, new_tree = parser(stream, pos, tree)
if new_tree is None:
return pos, tree
return new_pos, new_tree
return _parser
def not_follow(parser):
def _parser(stream, pos, tree):
_, new_tree = parser(stream, pos, Ignore())
if new_tree is None:
return pos, tree
return pos, None
return _parser
def follow(parser):
def _parser(stream, pos, tree):
_, new_tree = parser(stream, pos, Ignore())
if new_tree is None:
return pos, None
return pos, tree
return _parser
def node(name, classes):
cls = classes[name]
def _parser(stream, pos, tree):
return pos, cls()
return _parser
def append(parser, name):
method = "append_" + name
def _parser(stream, pos, tree):
new_pos, subtree = parser(stream, pos, Empty())
if subtree is None:
return pos, None
if isinstance(tree, Ignore):
return new_pos, tree
return new_pos, getattr(tree, method)(subtree)
return _parser
def top(parser, name):
method = "append_" + name
def _parser(stream, pos, tree):
new_pos, top_tree = parser(stream, pos, Empty())
if top_tree is None:
return pos, None
if isinstance(tree, Ignore):
return new_pos, tree
return new_pos, getattr(top_tree, method)(tree)
return _parser
def splice(parser, converters):
def _parser(stream, pos, tree):
new_pos, subtree = parser(stream, pos, Empty())
if subtree is None:
return pos, None
return new_pos, subtree.splice_to(tree, converters)
return _parser
def top_splice(parser, converters):
def _parser(stream, pos, tree):
new_pos, top_tree = parser(stream, pos, Empty())
if top_tree is None:
return pos, None
return new_pos, tree.splice_to(top_tree, converters)
return _parser
def ignore(parser):
def _parser(stream, pos, tree):
new_pos, new_tree = parser(stream, pos, Ignore())
if new_tree is None:
return pos, None
return new_pos, tree
return _parser
def replace(parser, value):
def _parser(stream, pos, tree):
new_pos, new_tree = parser(stream, pos, Ignore())
if new_tree is None:
return pos, None
return new_pos, tree.consume(value)
return _parser
def char_sequence(val):
def _parser(stream, pos, tree):
if pos + len(val) <= len(stream) and stream.startswith(val, pos):
return pos + len(val), tree.consume(val)
return pos, None
return _parser
def char_range(start, end):
def _parser(stream, pos, tree):
if pos < len(stream) and start <= stream[pos] <= end:
return pos + 1, tree.consume(stream[pos])
return pos, None
return _parser
def char_any(stream, pos, tree):
if pos < len(stream):
return pos + 1, tree.consume(stream[pos])
return pos, None
def fail(stream, pos, tree):
return pos, None
class Compiler(CaseUnapply1):
def char_any(self, val):
return char_any
def string(self, val):
return char_sequence(val)
def char(self, val):
return char_sequence(val)
def char_range(self, val):
return char_range(str(val.start), str(val.end))
def sequence(self, val):
return sequence(*(self(v) for v in val))
def choice(self, val):
return choice(*(self(v) for v in val))
def repeat(self, val):
return repeat(self(val))
def repeat1(self, val):
return repeat1(self(val))
def optional(self, val):
return optional(self(val))
def not_follow(self, val):
return not_follow(self(val))
def follow(self, val):
return follow(self(val))
def reference(self, val):
return self.rules[val]
def node(self, val):
return node(val, self.classes)
def append(self, val):
return append(self(val.expr), str(val.name))
def top(self, val):
return top(self(val.expr), str(val.name))
def splice(self, val):
return splice(self(val), self.converters)
def top_splice(self, val):
return top_splice(self(val), self.converters)
def ignore(self, val):
return ignore(self(val))
def replace(self, val):
return replace(self(val.expr), str(val.value))
def fail(self, val):
return fail
def compile_parser(gram, classes, converters=None):
rules = {k: Rule(k) for k in gram}
case = Compiler(
rules=rules,
classes=classes,
converters=converters or {}
)
for rule, body in gram.items():
rules[rule].define(case(body))
return rules[list(gram)[0]]
|
ethframe/descent | descent/ast.py | class char:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'char({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return char(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('char')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'char', 'value': self.val}
class octal:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'octal({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return octal(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('octal')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'octal', 'value': self.val}
class string:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'string({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return string(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('string')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'string', 'value': self.val}
class reference:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'reference({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return reference(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('reference')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'reference', 'value': self.val}
class rule:
__slots__ = ('name', 'expr')
def __init__(self, name=None, expr=None):
self.name = name
self.expr = expr
def __repr__(self):
return 'rule({!r}, {!r})'.format(
self.name,
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.name == other.name
and self.expr == other.expr
)
def unapply1(self):
return self
def unapply(self):
return (self.name, self.expr)
def copy(self):
return rule(
self.name,
self.expr,
)
def append_name(self, val):
self.name = val
return self
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_name(self.name)
if self.expr is not None:
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'rule',
'name': self.name.to_dict(),
'expr': None if self.expr is None else self.expr.to_dict(),
}
class fail:
def __repr__(self):
return 'fail()'
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
return self.__class__ is other.__class__
def unapply1(self):
return self
def unapply(self):
return (self,)
def copy(self):
return self
def splice_to(self, other):
return other
def to_dict(self):
return {'__type__': 'fail'}
class char_any:
def __repr__(self):
return 'char_any()'
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
return self.__class__ is other.__class__
def unapply1(self):
return self
def unapply(self):
return (self,)
def copy(self):
return self
def splice_to(self, other):
return other
def to_dict(self):
return {'__type__': 'char_any'}
class char_range:
__slots__ = ('start', 'end')
def __init__(self, start=None, end=None):
self.start = start
self.end = end
def __repr__(self):
return 'char_range({!r}, {!r})'.format(
self.start,
self.end,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.start == other.start
and self.end == other.end
)
def unapply1(self):
return self
def unapply(self):
return (self.start, self.end)
def copy(self):
return char_range(
self.start,
self.end,
)
def append_start(self, val):
self.start = val
return self
def append_end(self, val):
self.end = val
return self
def splice_to(self, other, converters):
other.append_start(self.start)
other.append_end(self.end)
return other
def to_dict(self):
return {
'__type__': 'char_range',
'start': self.start.to_dict(),
'end': self.end.to_dict(),
}
class append:
__slots__ = ('expr', 'name')
def __init__(self, expr=None, name=None):
self.expr = expr
self.name = name
def __repr__(self):
return 'append({!r}, {!r})'.format(
self.expr,
self.name,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
and self.name == other.name
)
def unapply1(self):
return self
def unapply(self):
return (self.expr, self.name)
def copy(self):
return append(
self.expr,
self.name,
)
def append_expr(self, val):
self.expr = val
return self
def append_name(self, val):
self.name = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
other.append_name(self.name)
return other
def to_dict(self):
return {
'__type__': 'append',
'expr': self.expr.to_dict(),
'name': self.name.to_dict(),
}
class top:
__slots__ = ('expr', 'name')
def __init__(self, expr=None, name=None):
self.expr = expr
self.name = name
def __repr__(self):
return 'top({!r}, {!r})'.format(
self.expr,
self.name,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
and self.name == other.name
)
def unapply1(self):
return self
def unapply(self):
return (self.expr, self.name)
def copy(self):
return top(
self.expr,
self.name,
)
def append_expr(self, val):
self.expr = val
return self
def append_name(self, val):
self.name = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
other.append_name(self.name)
return other
def to_dict(self):
return {
'__type__': 'top',
'expr': self.expr.to_dict(),
'name': self.name.to_dict(),
}
class splice:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'splice({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return splice(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'splice',
'expr': self.expr.to_dict(),
}
class top_splice:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'top_splice({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return top_splice(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'top_splice',
'expr': self.expr.to_dict(),
}
class ignore:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'ignore({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return ignore(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'ignore',
'expr': self.expr.to_dict(),
}
class node:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'node({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return node(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('node')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'node', 'value': self.val}
class optional:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'optional({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return optional(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'optional',
'expr': self.expr.to_dict(),
}
class repeat:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'repeat({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return repeat(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'repeat',
'expr': self.expr.to_dict(),
}
class repeat1:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'repeat1({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return repeat1(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'repeat1',
'expr': self.expr.to_dict(),
}
class replace:
__slots__ = ('expr', 'value')
def __init__(self, expr=None, value=None):
self.expr = expr
self.value = value
def __repr__(self):
return 'replace({!r}, {!r})'.format(
self.expr,
self.value,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
and self.value == other.value
)
def unapply1(self):
return self
def unapply(self):
return (self.expr, self.value)
def copy(self):
return replace(
self.expr,
self.value,
)
def append_expr(self, val):
self.expr = val
return self
def append_value(self, val):
self.value = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
other.append_value(self.value)
return other
def to_dict(self):
return {
'__type__': 'replace',
'expr': self.expr.to_dict(),
'value': self.value.to_dict(),
}
class follow:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'follow({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return follow(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'follow',
'expr': self.expr.to_dict(),
}
class not_follow:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'not_follow({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return not_follow(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'not_follow',
'expr': self.expr.to_dict(),
}
class choice:
__slots__ = ('items',)
def __init__(self, items=None):
self.items = items or []
def __repr__(self):
return 'choice({!r})'.format(
self.items,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.items == other.items
)
def unapply1(self):
return self.items
def unapply(self):
return (self.items,)
def copy(self):
return choice(
list(self.items),
)
def append_items(self, val):
self.items.append(val)
return self
def extend_items(self, val):
self.items.extend(val)
return self
def splice_to(self, other, converters):
other.extend_items(self.items)
return other
def to_dict(self):
return {
'__type__': 'choice',
'items': [i.to_dict() for i in self.items],
}
class sequence:
__slots__ = ('items',)
def __init__(self, items=None):
self.items = items or []
def __repr__(self):
return 'sequence({!r})'.format(
self.items,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.items == other.items
)
def unapply1(self):
return self.items
def unapply(self):
return (self.items,)
def copy(self):
return sequence(
list(self.items),
)
def append_items(self, val):
self.items.append(val)
return self
def extend_items(self, val):
self.items.extend(val)
return self
def splice_to(self, other, converters):
other.extend_items(self.items)
return other
def to_dict(self):
return {
'__type__': 'sequence',
'items': [i.to_dict() for i in self.items],
}
class expand:
__slots__ = ('name', 'args')
def __init__(self, name=None, args=None):
self.name = name
self.args = args or []
def __repr__(self):
return 'expand({!r}, {!r})'.format(
self.name,
self.args,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.name == other.name
and self.args == other.args
)
def unapply1(self):
return self
def unapply(self):
return (self.name, self.args)
def copy(self):
return expand(
self.name,
list(self.args),
)
def append_name(self, val):
self.name = val
return self
def append_args(self, val):
self.args.append(val)
return self
def extend_args(self, val):
self.args.extend(val)
return self
def splice_to(self, other, converters):
other.append_name(self.name)
other.extend_args(self.args)
return other
def to_dict(self):
return {
'__type__': 'expand',
'name': self.name.to_dict(),
'args': [i.to_dict() for i in self.args],
}
class macro:
__slots__ = ('name', 'args', 'expr')
def __init__(self, name=None, args=None, expr=None):
self.name = name
self.args = args or []
self.expr = expr
def __repr__(self):
return 'macro({!r}, {!r}, {!r})'.format(
self.name,
self.args,
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.name == other.name
and self.args == other.args
and self.expr == other.expr
)
def unapply1(self):
return self
def unapply(self):
return (self.name, self.args, self.expr)
def copy(self):
return macro(
self.name,
list(self.args),
self.expr,
)
def append_name(self, val):
self.name = val
return self
def append_args(self, val):
self.args.append(val)
return self
def extend_args(self, val):
self.args.extend(val)
return self
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_name(self.name)
other.extend_args(self.args)
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'macro',
'name': self.name.to_dict(),
'args': [i.to_dict() for i in self.args],
'expr': self.expr.to_dict(),
}
class grammar:
__slots__ = ('rules',)
def __init__(self, rules=None):
self.rules = rules or []
def __repr__(self):
return 'grammar({!r})'.format(
self.rules,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.rules == other.rules
)
def unapply1(self):
return self.rules
def unapply(self):
return (self.rules,)
def copy(self):
return grammar(
list(self.rules),
)
def append_rules(self, val):
self.rules.append(val)
return self
def extend_rules(self, val):
self.rules.extend(val)
return self
def splice_to(self, other, converters):
other.extend_rules(self.rules)
return other
def to_dict(self):
return {
'__type__': 'grammar',
'rules': [i.to_dict() for i in self.rules],
}
types_map = {
'char': char,
'octal': octal,
'string': string,
'reference': reference,
'rule': rule,
'fail': fail,
'char_any': char_any,
'char_range': char_range,
'append': append,
'top': top,
'splice': splice,
'top_splice': top_splice,
'ignore': ignore,
'node': node,
'optional': optional,
'repeat': repeat,
'repeat1': repeat1,
'replace': replace,
'follow': follow,
'not_follow': not_follow,
'choice': choice,
'sequence': sequence,
'expand': expand,
'macro': macro,
'grammar': grammar,
} |
ethframe/descent | descent/grammarcheck.py | from collections import namedtuple
from descent.fixpoint import CaseFix, fix, result_by_args
state = namedtuple("State", "wf nul inv")
class GrammarCheck(CaseFix):
def string(self, val):
return state(True, not val, False)
def char(self, val):
return state(True, False, False)
def char_any(self, val):
return state(True, False, False)
def char_range(self, val):
return state(True, False, False)
def sequence(self, val):
nul = True
inv = False
for p in val:
s = self(p)
if nul and not s.wf:
return self.fix.bot
nul &= s.nul
inv |= s.inv
return state(True, nul, inv)
def choice(self, val):
nul = False
for p in val:
s = self(p)
if not s.wf:
return self.fix.bot
nul |= s.nul
return state(True, nul, False)
def not_follow(self, val):
s = self(val)
return state(s.wf, not (s.wf and s.nul), s.inv)
def follow(self, val):
return self(val)
def optional(self, val):
s = self(val)
return state(s.wf, True, s.inv)
def repeat(self, val):
s = self(val)
return state(s.wf and not s.nul, True, s.inv or s.nul)
def repeat1(self, val):
s = self(val)
return state(s.wf and not s.nul, False, s.inv or s.nul)
def node(self, val):
return state(True, True, False)
def append(self, val):
return self(val.expr)
def top(self, val):
return self(val.expr)
def splice(self, val):
return self(val)
def top_splice(self, val):
return self(val)
def ignore(self, val):
return self(val)
def replace(self, val):
return self(val.expr)
def fail(self, val):
return state(True, False, False)
check = fix(GrammarCheck, state(False, True, False))
def check_grammar(gram):
return result_by_args(check(gram, [list(gram)[0]]))
def get_invalid(result):
return [rule for rule, res in result.items() if res.inv]
|
ethframe/descent | descent/source.py | source = r"""
Args<Item> <- MOPEN (Item:args (COMMA Item:args)*)? MCLOSE
LeftOp<Arg, Between, Type> <- Arg (Type^items (Between Arg:items)+)?
LeftOp<Arg, Type> <- Arg (Type^items Arg:items+)?
Grammar <- @grammar Spacing Definition:rules+ EndOfFile
Definition <- @rule Identifier:name (@macro^^ Args<Identifier>)?
LEFTARROW Expression:expr
Expression <- LeftOp<Sequence, SLASH, @choice>
Sequence <- LeftOp<Prefix, @sequence>
Prefix <- (AND / NOT) Suffix:expr / Suffix
Suffix <- AstOp (QUESTION / STAR / PLUS)^expr?
AstOp <- Primary ((APPEND / TOP)^expr Identifier:name /
REPLACE^expr Literal:value /
(SPLICE / TOPSPLICE / IGNORE)^expr)?
Primary <- Identifier (@expand^name Args<Expression>)? !LEFTARROW
/ OPEN Expression CLOSE / Literal / Class / Any / Node
Identifier <- @reference IdentStart IdentCont* Spacing
IdentStart <- [a-zA-Z_]
IdentCont <- IdentStart / [0-9]
Node <- @node "@"~ IdentStart IdentCont* Spacing
String<Q> <- Q~ (!Q Char::)* Q~ Spacing
Literal <- @string (String<'"'> / String<"'">)
Class <- "["~ (!"]" LeftOp<Range, !"]", @choice> / @fail) "]"~ Spacing
Range <- @char_range Char:start "-"~ Char:end / Char
Char <- @char char::
char <- @char (!"\\" .
/ "\\"~ (['\"\[\]\\\-] / "b":"\b" / "f":"\f"
/ "n":"\n" / "r":"\r" / "t":"\t"))
/ @octal "\\"~ ([0-2][0-7][0-7] / [0-7][0-7]?)
Any <- DOT
Token<S> <- S~ Spacing
Token<S, T> <- S~ Spacing T
LEFTARROW <- Token<"<-">
SLASH <- Token<"/">
AND <- Token<"&", @follow>
NOT <- Token<"!", @not_follow>
QUESTION <- Token<"?", @optional>
STAR <- Token<"*", @repeat>
PLUS <- Token<"+", @repeat1>
DOT <- Token<".", @char_any>
APPEND <- Token<":", @append>
REPLACE <- Token<":", @replace>
TOP <- Token<"^", @top>
SPLICE <- Token<"::", @splice>
TOPSPLICE <- Token<"^^", @top_splice>
IGNORE <- Token<"~", @ignore>
OPEN <- Token<"(">
CLOSE <- Token<")">
MOPEN <- Token<"<">
MCLOSE <- Token<">">
COMMA <- Token<",">
Spacing <- (Space / Comment)*
Comment <- "#"~ (!EndOfLine .~)* EndOfLine
Space <- [ \t]~ / EndOfLine
EndOfLine <- "\r\n"~ / [\r\n]~
EndOfFile <- !.
"""
converters = {
"octal": lambda v: chr(int(v, 8))
}
|
ethframe/descent | descent/macro.py | <reponame>ethframe/descent
from collections import OrderedDict
from descent.case import Case
class Macroexpander(Case):
def grammar(self, val, env):
rules = OrderedDict()
for rule in val.rules:
res = self(rule, env)
if res:
rules[str(res.name)] = res.expr
return rules
def macro(self, val, env):
val.expr = self(val.expr, env)
self.macro_env[(str(val.name), len(val.args))] = val
return None
def rule(self, val, env):
new_env = dict(env)
new_env["this"] = val.name
val.expr = self(val.expr, new_env)
return val
def expand(self, val, env):
new_env = dict(env)
macro = self.macro_env[(str(val.name), len(val.args))]
for arg, val in zip(macro.args, val.args):
new_env[str(arg)] = self(val, env)
return self.copying_expander(macro.expr.copy(), new_env)
def _expand_expr(self, val, env):
val.expr = self(val.expr, env)
return val
repeat1 = repeat = optional = _expand_expr
follow = not_follow = _expand_expr
replace = ignore = top_splice = splice = top = append = _expand_expr
def _expand_items(self, val, env):
val.items = [self(item, env) for item in val.items]
return val
choice = sequence = _expand_items
def reference(self, val, env):
return env.get(str(val), val)
def _expand_none(self, val, env):
return val
string = char_any = char_range = char = _expand_none
fail = node = _expand_none
class Copyingexpander(Case):
def _expand_expr(self, val, env):
val.expr = self(val.expr.copy(), env)
return val
repeat1 = repeat = optional = _expand_expr
follow = not_follow = _expand_expr
replace = ignore = top_splice = splice = top = append = _expand_expr
def _expand_items(self, val, env):
val.items = [self(item.copy(), env) for item in val.items]
return val
choice = sequence = _expand_items
def reference(self, val, env):
return env.get(str(val), val)
def _expand_none(self, val, env):
return val
string = char_any = char_range = char = _expand_none
fail = node = _expand_none
def expand_macros(grammar):
macro_env = {}
case = Macroexpander(
macro_env=macro_env,
copying_expander=Copyingexpander(macro_env=macro_env)
)
return case(grammar, {})
|
ethframe/descent | descent/helpers.py | <reponame>ethframe/descent
from descent.codegen import gen_ast_module
from descent.combinators import compile_parser
from descent.grammarcheck import check_grammar, get_invalid
from descent.parser import parse_grammar
from descent.macro import expand_macros
from descent.typeinference import infer_types
def parser_from_source(src, converters=None):
grammar = expand_macros(parse_grammar(src))
invalid = get_invalid(check_grammar(grammar))
if invalid:
raise ValueError(
"Following rules are invalid: {}".format(", ".join(invalid))
)
types = infer_types(grammar)
if types is None:
raise ValueError("Got invalid type")
ast = gen_ast_module(types)
return compile_parser(grammar, ast, converters)
|
ethframe/descent | examples/dynamic.py | <gh_stars>1-10
from itertools import product
from descent.case import CaseUnapply
from descent.helpers import parser_from_source
DYNAMIC_GRAMMAR = r"""
Start<B> <- _ B !.
Tok<S> <- S~ _
Tok<S, T> <- Tok<S> T
KW<S> <- S~ !ident _
Op<S> <- @Op S _
LExpr<E, O, R> <- E ((@BinOp O:op)^left R:right)*
LExpr<E, O> <- LExpr<E, O, E>
RExpr<E, O, S> <- E (O^left S:right)?
RExpr<E, O> <- RExpr<E, O, this>
UExpr<E, O> <- @UnaryOp O:op this:expr / E
Paren<O, E, C> <- Tok<O> E Tok<C>
List<I, D> <- (I (Tok<D> I)*)?
OneOpt<A, B> <- A B? / B
Program <- Start<Statements>
Statements <- @Statements Statement:statements*
Empty <- @Statements
Statement <- Condition / Assignment
Block <- Paren<"{", Statements, "}">
Assignment <- @Assignment Variable:lvalue Tok<"="> Expression:rvalue
Condition <- @Condition
KW<"if"> Expression:predicate
Block:true
(KW<"else"> (Condition / Block) / Empty):false
Expression <- P5
P5 <- LExpr<P4, Op<"==":"eq" / "!=":"neq">>
P4 <- LExpr<P3, Op<"+":"add" / "-":"sub">>
P3 <- LExpr<P2, Op<"*":"mul" / "/":"div">>
P2 <- UExpr<P1, Op<"-":"neg">>
P1 <- LExpr<P0, Op<"":"index">, Paren<"[", Expression, "]">>
P0 <- Call / Variable / Number / String / Paren<"(", Expression, ")">
Call <- @Call (@Name identifier):name
Paren<"(", List<Expression:args, ","> ,")">
Number <- @Integer "-"? ("0" / [1-9][0-9]*)
(OneOpt<"."[0-9]+, [eE][-+]?[0-9]+> @Float^^ @Float^^)? _
Variable <- @Variable !keywords identifier _
String <- '"'~ @String char* '"'~ _
identifier <- ident_start ident*
ident_start <- [a-zA-Z_]
ident <- [a-zA-Z0-9_]
char <- "\\"~ (["\\/] / "b":"\b" / "f":"\f" /
"t":"\t" / "r":"\r" / "n":"\n")
/ !["\\\b\f\t\r\n] .
keywords <- ("if" / "else") !ident
_ <- ([ \t\r\n]*)~
"""
TYPES_GRAMMAR = r"""
Start<B> <- _ B !.
Tok<S> <- S~ _
List<I, D> <- I (Tok<D> I)*
Types <- Start<@Types Element:elements*>
Element <- Function / Type
Type <- @Type Name:name (Tok<"<:"> Name:base)?
Function <- @Function List<Name:names, "|"> Tok<":"> List<FType:types, "|">
FType <- @FType (Tok<"()"> / List<Name:args, ",">) Tok<"->"> Name:result
Name <- @Name [a-zA-Z_][a-zA-Z0-9_]* _
_ <- ([ \t\r\n]*)~
"""
types_parser = parser_from_source(TYPES_GRAMMAR)
class TypesDef(CaseUnapply):
def types(self, items):
for item in items:
self(item)
return self.env
def type(self, name, base):
name = str(name)
if name in self.env:
raise TypeError("Duplicate definition: {}".format(name))
if base is None:
self.env[name] = name
else:
base = str(base)
if base not in self.env:
raise TypeError("Not defined: {}".format(base))
self.env[name] = base
def function(self, names, types):
types = {
tuple(str(arg) for arg in tp.args): str(tp.result)
for tp in types
}
for args, result in types.items():
if result not in self.env:
raise TypeError("Not defined: {}".format(result))
for arg in args:
if arg not in self.env:
raise TypeError("Not defined: {}".format(arg))
names = [str(name) for name in names]
for name in names:
if name in self.env:
sigs = self.env[name]
for args, result in types.items():
if args in sigs:
raise TypeError(
"Duplicate definition: {}".format(name)
)
sigs[args] = result
else:
self.env[name] = dict(types)
class Definitions:
def __init__(self, source):
self.env = TypesDef(env={})(types_parser.parse(source))
def seq(self, t):
yield t
while self.env[t] != t:
t = self.env[t]
yield t
def common(self, t1, t2):
t1s = set(self.seq(t1))
while t2 not in t1s:
t2 = self.env[t2]
return t2
def func(self, name, args):
ts = self.env[name]
for a in product(*(self.seq(t) for t in args)):
if a in ts:
return ts[a]
raise TypeError(name, args, ts)
class Types(CaseUnapply):
_types = Definitions("""
dynamic
string <: dynamic
float <: dynamic
integer <: float
bool <: dynamic
dict <: dynamic
add | sub | mul | div : float, float -> float
add | sub | mul : integer, integer -> integer
add : string, string -> string
neg : float -> float
| integer -> integer
eq | neq : dynamic, dynamic -> bool
to_string : integer -> string
index : dict, dynamic -> dynamic
environment : () -> dict
""")
def statements(self, statements):
for statement in statements:
self(statement)
def assignment(self, lvalue, rvalue):
var = str(lvalue)
value = self(rvalue)
self.env[var] = self._types.common(self.env.get(var, value), value)
def integer(self, value):
return "integer"
def float(self, value):
return "float"
def string(self, value):
return "string"
def variable(self, value):
return self.env[value]
def binop(self, op, left, right):
return self._types.func(str(op), (self(left), self(right)))
def unaryop(self, op, expr):
return self._types.func(str(op), (self(expr),))
def condition(self, predicate, true, false):
if self(predicate) != "bool":
raise TypeError(predicate)
self(true)
self(false)
def call(self, name, args):
return self._types.func(str(name), tuple(self(arg) for arg in args))
class Evaluate(CaseUnapply):
def statements(self, statements):
for statement in statements:
self(statement)
def assignment(self, lvalue, rvalue):
self.env[str(lvalue)] = self(rvalue)
def integer(self, value):
return int(value)
def float(self, value):
return float(value)
def string(self, value):
return value
def variable(self, value):
return self.env[value]
def binop(self, op, left, right):
return {
"mul": lambda a, b: a * b,
"div": lambda a, b: a / b,
"add": lambda a, b: a + b,
"sub": lambda a, b: a - b,
"eq": lambda a, b: a == b,
"neq": lambda a, b: a != b,
"index": lambda a, b: a[b],
}[str(op)](self(left), self(right))
def unaryop(self, op, expr):
return {
"neg": lambda a: -a
}[str(op)](self(expr))
def condition(self, predicate, true, false):
if self(predicate) is True:
self(true)
else:
self(false)
def call(self, name, args):
return {
"to_string": str,
"environment": lambda: self.env,
}[str(name)](*(self(arg) for arg in args))
def main():
dynamic_parser = parser_from_source(DYNAMIC_GRAMMAR)
parsed = dynamic_parser.parse(r"""
a = 3
b = a
c = "foo" + "bar"
d = a * 3
e = 3
a = -(d + 2) * 0.3 + 1
f = 2 / 3
g = 1
g = "1"
flag = 3
if flag * 2 == 2 {
var = flag * 12
} else if flag != 2 {
var = flag / 2
} else {
var = to_string(flag - 1) + "a"
}
if flag != 0 {
env = environment()
}
item = env["flag"]
""")
print(parsed)
env = {}
Types(env=env)(parsed)
print(env)
env = {}
Evaluate(env=env)(parsed)
print(env)
if __name__ == '__main__':
main()
|
ethframe/descent | descent/asttypes.py | <reponame>ethframe/descent
from collections import namedtuple, OrderedDict
class Type:
def __iter__(self):
yield self
class ScalarType(Type):
def __eq__(self, other):
return self.__class__ is other.__class__
def __hash__(self):
return hash(self.__class__)
def merge(self, other):
return None
class UnknownType(ScalarType):
def __repr__(self):
return '?'
def append(self, other, name):
return self
def splice(self, other):
return self
def flat(self):
return self
class InvalidType(ScalarType):
def __repr__(self):
return '#'
def append(self, other, name):
return self
def splice(self, other):
return self
def flat(self):
return self
class EmptyType(ScalarType):
def __repr__(self):
return '()'
def append(self, other, name):
return InvalidType()
def splice(self, other):
if isinstance(other, UnknownType):
return other
if isinstance(other, (EmptyType, NamedType)):
return self
if isinstance(other, (StringType, TokenType)):
return StringType()
if isinstance(other, OrType):
return merge_types(self.splice(t) for t in other)
return InvalidType()
def flat(self):
return InvalidType()
class StringType(ScalarType):
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
def append(self, other, name):
return InvalidType()
def splice(self, other):
if isinstance(other, UnknownType):
return other
if isinstance(other, (EmptyType, NamedType, StringType, TokenType)):
return self
if isinstance(other, OrType):
return merge_types(self.splice(t) for t in other)
return InvalidType()
def flat(self):
return InvalidType()
class NamedType(Type):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def __eq__(self, other):
return self.__class__ is other.__class__ and self.name == other.name
def __hash__(self):
return hash((self.__class__, self.name))
def append(self, other, name):
if isinstance(other, UnknownType):
return other
other = other.flat()
if isinstance(other, InvalidType):
return InvalidType()
return NodeType(self.name, OrderedDict([(name, Field(False, False))]))
def splice(self, other):
if isinstance(other, UnknownType):
return other
if isinstance(other, (EmptyType, NamedType)):
return self
if isinstance(other, (StringType, TokenType)):
return TokenType(self.name)
if isinstance(other, NodeType):
return NodeType(self.name, other.fields)
if isinstance(other, OrType):
return merge_types(self.splice(t) for t in other)
return InvalidType()
def flat(self):
return self
def merge(self, other):
if self == other:
return self
return other.merge(self)
class TokenType(Type):
def __init__(self, name):
self.name = name
def __repr__(self):
return '${}'.format(self.name)
def __eq__(self, other):
return self.__class__ is other.__class__ and self.name == other.name
def __hash__(self):
return hash((self.__class__, self.name))
def append(self, other, name):
return InvalidType()
def splice(self, other):
if isinstance(other, UnknownType):
return other
if isinstance(other, (EmptyType, NamedType, StringType, TokenType)):
return self
if isinstance(other, OrType):
return merge_types(self.splice(t) for t in other)
return InvalidType()
def flat(self):
return NamedType(self.name)
def merge(self, other):
if self == other:
return self
if isinstance(other, NamedType):
return self
return InvalidType()
class Field(namedtuple("Field", "arr opt")):
def append(self, other):
return Field(True, self.opt or other.opt)
def __repr__(self):
if self.arr:
return "[]"
elif self.opt:
return "?"
return "_"
class NodeType(Type):
def __init__(self, name, fields):
self.name = name
self.fields = fields
def __repr__(self):
return '{}({})'.format(
self.name,
', '.join(
'{}={!r}'.format(name, field)
for name, field in self.fields.items()
)
)
def __eq__(self, other):
return self.__class__ is other.__class__ and self.name == other.name \
and self.fields == other.fields
def __hash__(self):
return hash(
(self.__class__, self.name, tuple(sorted(self.fields.items())))
)
def append(self, other, name):
if isinstance(other, UnknownType):
return other
other = other.flat()
if isinstance(other, InvalidType):
return other
fields = OrderedDict(self.fields)
if name in fields:
fields[name] = fields[name].append(Field(False, False))
else:
fields[name] = Field(False, False)
return NodeType(self.name, fields)
def splice(self, other):
if isinstance(other, (EmptyType, NamedType)):
return self
if isinstance(other, NodeType):
fields = OrderedDict(self.fields)
for name, of in other.fields.items():
if name in fields:
of = fields[name].append(of)
fields[name] = of
return NodeType(self.name, fields)
if isinstance(other, OrType):
return merge_types(self.splice(t) for t in other)
if isinstance(other, UnknownType):
return other
return InvalidType()
def flat(self):
return NamedType(self.name)
def merge(self, other):
if self == other:
return self
if isinstance(other, NamedType):
fields = OrderedDict()
for name, field in self.fields.items():
fields[name] = Field(field.arr, True)
return NodeType(self.name, fields)
if isinstance(other, NodeType):
fields = OrderedDict()
for name, sf in self.fields.items():
of = other.fields.get(name, Field(False, True))
fields[name] = Field(sf.arr or of.arr, sf.opt or of.opt)
for name, of in other.fields.items():
if name not in self.fields:
fields[name] = Field(of.arr, True)
return NodeType(self.name, fields)
return InvalidType()
class OrType(Type):
def __init__(self, items):
self.items = tuple(items)
def __repr__(self):
return "({})".format(' | '.join(repr(i) for i in self.items))
def __eq__(self, other):
return self.__class__ is other.__class__ \
and set(self.items) == set(other.items)
def __hash__(self):
return hash((self.__class__, self.items))
def __iter__(self):
return iter(self.items)
def append(self, other, name):
return merge_types(i.append(other, name) for i in self.items)
def splice(self, other):
return merge_types(i.splice(other) for i in self.items)
def flat(self):
return merge_types(i.flat() for i in self.items)
def merge_types(types):
types_list = []
for tp in types:
if isinstance(tp, InvalidType):
return tp
elif isinstance(tp, UnknownType):
continue
else:
types_list.extend(tp)
merged = OrderedDict()
for tp in types_list:
if isinstance(tp, (EmptyType, StringType)):
merged[tp] = tp
else:
if tp.name in merged:
tp = merged[tp.name].merge(tp)
merged[tp.name] = tp
distinct_types = list(merged.values())
if not distinct_types:
return UnknownType()
if len(distinct_types) == 1:
return distinct_types[0]
return OrType(distinct_types)
|
ethframe/descent | make_parser.py | from argparse import ArgumentParser
from descent.parser import parse_grammar
from descent.grammarcheck import check_grammar, get_invalid
from descent.typeinference import infer_types
from descent.codegen import gen_ast_module_src
from descent.macro import expand_macros
def generate(input):
parsed = parse_grammar(input)
if parsed is None:
raise ValueError("Invald grammar")
grammar = expand_macros(parsed)
invalid = get_invalid(check_grammar(grammar))
if invalid:
raise ValueError("Invalid rules: {}".format(", ".join(invalid)))
types = infer_types(grammar)
yield "from collections import OrderedDict\n"
yield "from descent.ast import *"
yield "from descent.combinators import compile_parser\n\n"
yield gen_ast_module_src(types)
yield ""
yield ""
yield "parsed_grammar = OrderedDict(["
for name, body in grammar.items():
yield " ({!r}, {!r}),".format(name, body)
yield "])\n\n"
yield "parser = compile_parser(parsed_grammar, types_map)"
def main():
argparser = ArgumentParser()
argparser.add_argument("input", help="Input file")
argparser.add_argument("output", help="Output file")
args = argparser.parse_args()
with open(args.input) as inf, open(args.output, "w") as outf:
for line in generate(inf.read()):
outf.write(line)
outf.write("\n")
if __name__ == '__main__':
main()
|
RasmusBC59/Qcodes | qcodes/data/data_array.py | <filename>qcodes/data/data_array.py<gh_stars>0
from typing import Dict, Any, Optional
import numpy as np
import collections
from qcodes.utils.helpers import DelegateAttributes, full_class, warn_units
import xarray as xr
class DataArray(DelegateAttributes):
"""
A container for one parameter in a measurement loop.
If this is a measured parameter, This object doesn't contain
the data of the setpoints it was measured at, but it references
the DataArray objects of these parameters. Those objects only have
the dimensionality at which they were set - ie the inner loop setpoint
the same dimensionality as the measured parameter, but the outer
loop setpoint(s) have lower dimensionality
When it's first created, a DataArray has no dimensionality, you must call
.nest for each dimension.
If preset_data is provided it is used to initialize the data, and the array
can still be nested around it (making many copies of the data).
Otherwise it is an error to nest an array that already has data.
Once the array is initialized, a DataArray acts a lot like a numpy array,
because we delegate attributes through to the numpy array
Args:
parameter (Optional[Parameter]): The parameter whose values will
populate this array, if any. Will copy ``name``, ``full_name``,
``label``, ``unit``, and ``snapshot`` from here unless you
provide them explicitly.
name (Optional[str]): The short name of this array.
TODO: use full_name as name, and get rid of short name
full_name (Optional[str]): The complete name of this array. If the
array is based on a parameter linked to an instrument, this is
typically '<instrument_name>_<param_name>'
label (Optional[str]): A description of the values in this array to
use for axis and colorbar labels on plots.
snapshot (Optional[dict]): Metadata snapshot to save with this array.
array_id (Optional[str]): A name for this array that's unique within
its ``DataSet``. Typically the full_name, but when the ``DataSet``
is constructed we will append '_<i>' (``i`` is an integer starting
from 1) if necessary to differentiate arrays with the same id.
TODO: this only happens for arrays provided to the DataSet
constructor, not those added with add_array. Fix this!
Also, do we really need array_id *and* full_name (let alone name
but I've already said we should remove this)?
set_arrays (Optional[Tuple[DataArray]]): If this array is being
created with shape already, you can provide one setpoint array
per dimension. The first should have one dimension, the second
two dimensions, etc.
shape (Optional[Tuple[int]]): The shape (as in numpy) of the array.
Will be prepended with new dimensions by any calls to ``nest``.
action_indices (Optional[Tuple[int]]): If used within a ``Loop``,
these are the indices at each level of nesting within the
``Loop`` of the loop action that's populating this array.
TODO: this shouldn't be in DataArray at all, the loop should
handle converting this to array_id internally (maybe it
already does?)
unit (Optional[str]): The unit of the values stored in this array.
units (Optional[str]): DEPRECATED, redirects to ``unit``.
is_setpoint (bool): True if this is a setpoint array, False if it
is measured. Default False.
preset_data (Optional[Union[numpy.ndarray, Sequence]]): Contents of the
array, if already known (for example if this is a setpoint
array). ``shape`` will be inferred from this array instead of
from the ``shape`` argument.
"""
# attributes of self to include in the snapshot
SNAP_ATTRS = (
'array_id',
'name',
'shape',
'unit',
'label',
'action_indices',
'is_setpoint')
# attributes of the parameter (or keys in the incoming snapshot)
# to copy to DataArray attributes, if they aren't set some other way
COPY_ATTRS_FROM_INPUT = (
'name',
'label',
'unit')
# keys in the parameter snapshot to omit from our snapshot
SNAP_OMIT_KEYS = (
'ts',
'value',
'__class__',
'set_arrays',
'shape',
'array_id',
'action_indices')
def __init__(self, parameter=None, name=None, full_name=None, label=None,
snapshot=None, array_id=None, set_arrays=(), shape=None,
action_indices=(), unit=None, units=None, is_setpoint=False,
preset_data=None):
self.name = name
self.full_name = full_name or name
self.label = label
self.shape = shape
if units is not None:
warn_units('DataArray', self)
if unit is None:
unit = units
self.unit = unit
self.array_id = array_id
self.is_setpoint = is_setpoint
self.action_indices = action_indices
self.set_arrays = set_arrays
self._preset = False
# store a reference up to the containing DataSet
# this also lets us make sure a DataArray is only in one DataSet
self._data_set = None
self.last_saved_index = None
self.modified_range = None
self.ndarray = None
if snapshot is None:
snapshot = {}
self._snapshot_input = {}
if parameter is not None:
param_full_name = getattr(parameter, 'full_name', None)
if param_full_name and not full_name:
self.full_name = parameter.full_name
if hasattr(parameter, 'snapshot') and not snapshot:
snapshot = parameter.snapshot()
else:
# TODO: why is this in an else clause?
for attr in self.COPY_ATTRS_FROM_INPUT:
if (hasattr(parameter, attr) and
not getattr(self, attr, None)):
setattr(self, attr, getattr(parameter, attr))
for key, value in snapshot.items():
if key not in self.SNAP_OMIT_KEYS:
self._snapshot_input[key] = value
if (key in self.COPY_ATTRS_FROM_INPUT and
not getattr(self, key, None)):
setattr(self, key, value)
if not self.label:
self.label = self.name
if preset_data is not None:
self.init_data(preset_data)
elif shape is None:
self.shape = ()
@property
def data_set(self):
"""
The DataSet this array belongs to.
A DataArray can belong to at most one DataSet.
TODO: make this a weakref
"""
return self._data_set
@data_set.setter
def data_set(self, new_data_set):
if (self._data_set is not None and
new_data_set is not None and
self._data_set != new_data_set):
raise RuntimeError('A DataArray can only be part of one DataSet')
self._data_set = new_data_set
def nest(self, size, action_index=None, set_array=None):
"""
Nest this array inside a new outer loop.
You cannot call ``nest`` after ``init_data`` unless this is a
setpoint array.
TODO: is this restriction really useful? And should we maintain
a distinction between _preset and is_setpoint, or can wejust use
is_setpoint?
Args:
size (int): Length of the new loop.
action_index (Optional[int]): Within the outer loop at this
nesting level, which action does this array derive from?
set_array (Optional[DataArray]): The setpoints of the new outer
loop. If this DataArray *is* a setpoint array, you should
omit both ``action_index`` and ``set_array``, and it will
reference itself as the inner setpoint array.
Returns:
DataArray: self, in case you want to construct the array with
chained method calls.
"""
if self.ndarray is not None and not self._preset:
raise RuntimeError('Only preset arrays can be nested after data '
'is initialized! {}'.format(self))
if set_array is None:
if self.set_arrays:
raise TypeError('a setpoint array must be its own inner loop')
set_array = self
self.shape = (size, ) + self.shape
if action_index is not None:
self.action_indices = (action_index, ) + self.action_indices
self.set_arrays = (set_array, ) + self.set_arrays
if self._preset:
inner_data = self.ndarray
self.ndarray = np.ndarray(self.shape)
# existing preset array copied to every index of the nested array.
for i in range(size):
self.ndarray[i] = inner_data
# update modified_range so the entire array still looks modified
self.modified_range = (0, self.ndarray.size - 1)
self._set_index_bounds()
return self
def init_data(self, data=None):
"""
Create the actual numpy array to hold data.
The array will be sized based on either ``self.shape`` or
data provided here.
Idempotent: will do nothing if the array already exists.
If data is provided, this array is marked as a preset
meaning it can still be nested around this data.
TODO: per above, perhaps remove this distinction entirely?
Args:
data (Optional[Union[numpy.ndarray, Sequence]]): If provided,
we fill the array with this data. Otherwise the new
array will be filled with NaN.
Raises:
ValueError: if ``self.shape`` does not match ``data.shape``
ValueError: if the array was already initialized with a
different shape than we're about to create
"""
if data is not None:
if not isinstance(data, np.ndarray):
if isinstance(data, collections.abc.Iterator):
# faster than np.array(tuple(data)) (or via list)
# but requires us to assume float
data = np.fromiter(data, float)
else:
data = np.array(data)
if self.shape is None:
self.shape = data.shape
elif data.shape != self.shape:
raise ValueError('preset data must be a sequence '
'with shape matching the array shape',
data.shape, self.shape)
self.ndarray = data
self._preset = True
# mark the entire array as modified
self.modified_range = (0, data.size - 1)
elif self.ndarray is not None:
if self.ndarray.shape != self.shape:
raise ValueError('data has already been initialized, '
'but its shape doesn\'t match self.shape')
return
else:
self.ndarray = np.ndarray(self.shape)
self.clear()
self._set_index_bounds()
def _set_index_bounds(self):
self._min_indices = [0 for d in self.shape]
self._max_indices = [d - 1 for d in self.shape]
def clear(self):
"""Fill the (already existing) data array with nan."""
# only floats can hold nan values. I guess we could
# also raise an error in this case? But generally float is
# what people want anyway.
if self.ndarray.dtype != float:
self.ndarray = self.ndarray.astype(float)
self.ndarray.fill(float('nan'))
def __setitem__(self, loop_indices, value):
"""
Set data values.
Follows numpy syntax, allowing indices of lower dimensionality than
the array, if value makes up the extra dimension(s)
Also update the record of modifications to the array. If you don't
want this overhead, you can access ``self.ndarray`` directly.
"""
if isinstance(loop_indices, collections.abc.Iterable):
min_indices = list(loop_indices)
max_indices = list(loop_indices)
else:
min_indices = [loop_indices]
max_indices = [loop_indices]
for i, index in enumerate(min_indices):
if isinstance(index, slice):
start, stop, step = index.indices(self.shape[i])
min_indices[i] = start
max_indices[i] = start + (
((stop - start - 1)//step) * step)
min_li = self.flat_index(min_indices, self._min_indices)
max_li = self.flat_index(max_indices, self._max_indices)
self._update_modified_range(min_li, max_li)
self.ndarray.__setitem__(loop_indices, value)
def __getitem__(self, loop_indices):
return self.ndarray[loop_indices]
delegate_attr_objects = ['ndarray']
def __len__(self):
"""
Array length.
Must be explicitly delegated, because len() will look for this
attribute to already exist.
"""
return len(self.ndarray)
def flat_index(self, indices, index_fill=None):
"""
Generate the raveled index for the given indices.
This is the index you would have if the array is reshaped to 1D,
looping over the indices from inner to outer.
Args:
indices (Sequence): indices of an element or slice of this array.
index_fill (Optional[Sequence]): extra indices to use if
``indices`` has less dimensions than the array, ie it points
to a slice rather than a single element. Use zeros to get the
beginning of this slice, and [d - 1 for d in shape] to get the
end of the slice.
Returns:
int: the resulting flat index.
"""
if len(indices) < len(self.shape):
indices = indices + index_fill[len(indices):]
return np.ravel_multi_index(tuple(zip(indices)), self.shape)[0]
def _update_modified_range(self, low, high):
if self.modified_range:
self.modified_range = (min(self.modified_range[0], low),
max(self.modified_range[1], high))
else:
self.modified_range = (low, high)
def mark_saved(self, last_saved_index):
"""
Mark certain outstanding modifications as saved.
Args:
last_saved_index (int): The flat index of the last point
saved. If ``modified_range`` extends beyond this, the
data past ``last_saved_index`` will still be marked
modified, otherwise ``modified_range`` is cleared
entirely.
"""
if self.modified_range:
if last_saved_index >= self.modified_range[1]:
self.modified_range = None
else:
self.modified_range = (max(self.modified_range[0],
last_saved_index + 1),
self.modified_range[1])
self.last_saved_index = last_saved_index
def clear_save(self):
"""
Make previously saved parts of this array look unsaved (modified).
This can be used to force overwrite or rewrite, like if we're
moving or copying the ``DataSet``.
"""
if self.last_saved_index is not None:
self._update_modified_range(0, self.last_saved_index)
self.last_saved_index = None
def get_synced_index(self):
"""
Get the last index which has been synced from the server.
Will also initialize the array if this hasn't happened already.
TODO: seems hacky to init_data here.
Returns:
int: the last flat index which has been synced from the server,
or -1 if no data has been synced.
"""
if not hasattr(self, 'synced_index'):
self.init_data()
self.synced_index = -1
return self.synced_index
def get_changes(self, synced_index):
"""
Find changes since the last sync of this array.
Args:
synced_index (int): The last flat index which has already
been synced.
Returns:
Union[dict, None]: None if there is no new data. If there is,
returns a dict with keys:
start (int): the flat index of the first returned value.
stop (int): the flat index of the last returned value.
vals (List[float]): the new values
"""
latest_index = self.last_saved_index
if latest_index is None:
latest_index = -1
if self.modified_range:
latest_index = max(latest_index, self.modified_range[1])
vals = [
self.ndarray[np.unravel_index(i, self.ndarray.shape)]
for i in range(synced_index + 1, latest_index + 1)
]
if vals:
return {
'start': synced_index + 1,
'stop': latest_index,
'vals': vals
}
def apply_changes(self, start, stop, vals):
"""
Insert new synced values into the array.
To be be called in a ``PULL_FROM_SERVER`` ``DataSet`` using results
returned by ``get_changes`` from the ``DataServer``.
TODO: check that vals has the right length?
Args:
start (int): the flat index of the first new value.
stop (int): the flat index of the last new value.
vals (List[float]): the new values
"""
for i, val in enumerate(vals):
index = np.unravel_index(i + start, self.ndarray.shape)
self.ndarray[index] = val
self.synced_index = stop
def __repr__(self):
array_id_or_none = f' {self.array_id}' if self.array_id else ''
return '{}[{}]:{}\n{}'.format(self.__class__.__name__,
','.join(map(str, self.shape)),
array_id_or_none, repr(self.ndarray))
def snapshot(self, update=False):
"""JSON representation of this DataArray."""
snap = {'__class__': full_class(self)}
snap.update(self._snapshot_input)
for attr in self.SNAP_ATTRS:
snap[attr] = getattr(self, attr)
return snap
def fraction_complete(self):
"""
Get the fraction of this array which has data in it.
Or more specifically, the fraction of the latest point in the array
where we have touched it.
Returns:
float: fraction of array which is complete, from 0.0 to 1.0
"""
if self.ndarray is None:
return 0.0
last_index = -1
if self.last_saved_index is not None:
last_index = max(last_index, self.last_saved_index)
if self.modified_range is not None:
last_index = max(last_index, self.modified_range[1])
if getattr(self, 'synced_index', None) is not None:
last_index = max(last_index, self.synced_index)
return (last_index + 1) / self.ndarray.size
@property
def units(self):
warn_units('DataArray', self)
return self.unit
def to_xarray(self) -> xr.DataArray:
""" Return this DataArray as an xarray dataarray
Returns:
DataArray in xarray format
"""
xarray_dictionary = data_array_to_xarray_dictionary(self)
xarray_dataarray = xr.DataArray.from_dict(xarray_dictionary)
return xarray_dataarray
@classmethod
def from_xarray(cls, xarray_dataarray: xr.DataArray, array_id: Optional[str] = None) -> 'DataArray':
""" Create a DataArray from an xarray DataArray
Args:
array_id: Array id for the new DataArray. If None, then use the first data variable from the argument
Returns:
Created xarray DataArray
"""
xarray_dict = xarray_dataarray.to_dict()
if array_id is None:
array_id = list(xarray_dict['dims'])[0]
data_array = xarray_data_array_dictionary_to_data_array(array_id, xarray_dict, is_setpoint=False)
return data_array
def data_array_to_xarray_dictionary(data_array: DataArray) -> Dict[str, Any]:
"""Convert DataArray to a dictionary in xarray format.
Args:
data_array: The DataArray to convert.
Returns:
dict: A dictionary containing the data in xarray format.
"""
key_mapping = {"unit": "unit", "name": "name", "label": "label"}
data_dictionary = {
target_key: getattr(data_array, key) for key, target_key in key_mapping.items()
}
data_dictionary['long_name'] = data_array.name
if data_array.is_setpoint:
data_dictionary["dims"] = tuple([data_array.array_id])
data_dictionary["depends_on"] = data_dictionary["dims"]
data = data_array.ndarray
# flatten data, assumes setpoint is uniform as for a normal gridded dataset
while len(data.shape) > 1:
data = data[0, ..., :]
data_dictionary["data"] = data
else:
if data_array.set_arrays:
data_dictionary["dims"] = tuple([a.array_id for a in data_array.set_arrays])
data_dictionary["depends_on"] = data_dictionary["dims"]
data_dictionary["data"] = data_array.ndarray
return data_dictionary
def xarray_data_array_dictionary_to_data_array(
array_id: str, array_dictionary: Dict[str, Any], is_setpoint: bool = False, preset_data=None):
"""Convert xarray dictionary to a DataArray
This conversion is for bith the data array and the the internal xarray structure, e.g. the datavars and coords.
Args:
array_id: Create the new DataArray with this id
array_dictionary: Data to convert
is_setpoint: Passed to the DataArray constructor
preset_data: If None use the data from the dictionary, otherwise use the specified data.
Returns:
dict: A dictionary containing the data in xarray format.
"""
if preset_data is None:
preset_data = np.array(array_dictionary["data"])
array_name = array_dictionary.get("name", array_id)
array_full_name = array_dictionary.get("long_name", array_name)
data_array = DataArray(
name=array_name,
full_name=array_full_name,
label=array_dictionary.get("label", ""),
unit=array_dictionary.get("unit", None),
is_setpoint=is_setpoint,
shape=preset_data.shape,
array_id=array_id,
preset_data=preset_data,
)
return data_array
|
RasmusBC59/Qcodes | qcodes/tests/dataset/test__get_data_from_ds.py | import pytest
import numpy as np
from numpy.testing import assert_allclose
import hypothesis.strategies as hst
from hypothesis import HealthCheck, given, settings
import qcodes as qc
from qcodes.dataset.data_export import get_data_by_id, _get_data_from_ds
from qcodes.dataset.descriptions.param_spec import ParamSpecBase
from qcodes.dataset.descriptions.dependencies import InterDependencies_
from qcodes.dataset.measurements import Measurement
from qcodes.utils.deprecate import QCoDeSDeprecationWarning
def test_get_data_by_id_order(dataset):
"""
Test that the added values of setpoints end up associated with the correct
setpoint parameter, irrespective of the ordering of those setpoint
parameters
"""
indepA = ParamSpecBase('indep1', "numeric")
indepB = ParamSpecBase('indep2', "numeric")
depAB = ParamSpecBase('depAB', "numeric")
depBA = ParamSpecBase('depBA', "numeric")
idps = InterDependencies_(
dependencies={depAB: (indepA, indepB), depBA: (indepB, indepA)})
dataset.set_interdependencies(idps)
dataset.mark_started()
dataset.add_results([{'depAB': 12,
'indep2': 2,
'indep1': 1}])
dataset.add_results([{'depBA': 21,
'indep2': 2,
'indep1': 1}])
dataset.mark_completed()
with pytest.warns(QCoDeSDeprecationWarning):
data1 = get_data_by_id(dataset.run_id)
data2 = _get_data_from_ds(dataset)
for data in (data1, data2):
data_dict = {el['name']: el['data'] for el in data[0]}
assert data_dict['indep1'] == 1
assert data_dict['indep2'] == 2
data_dict = {el['name']: el['data'] for el in data[1]}
assert data_dict['indep1'] == 1
assert data_dict['indep2'] == 2
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_datasaver_multidimarrayparameter_as_array(
SpectrumAnalyzer,
bg_writing
):
array_param = SpectrumAnalyzer.multidimspectrum
meas = Measurement()
meas.register_parameter(array_param, paramtype='array')
assert len(meas.parameters) == 4
inserted_data = array_param.get()
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((array_param, inserted_data))
expected_shape = (1, 100, 50, 20)
datadicts = _get_data_from_ds(datasaver.dataset)
assert len(datadicts) == 1
for datadict_list in datadicts:
assert len(datadict_list) == 4
for datadict in datadict_list:
datadict['data'].shape = (np.prod(expected_shape),)
if datadict['name'] == "dummy_SA_Frequency0":
temp_data = np.linspace(array_param.start,
array_param.stop,
array_param.npts[0])
expected_data = np.repeat(temp_data,
expected_shape[2] * expected_shape[3])
if datadict['name'] == "dummy_SA_Frequency1":
temp_data = np.linspace(array_param.start,
array_param.stop,
array_param.npts[1])
expected_data = np.tile(np.repeat(temp_data, expected_shape[3]),
expected_shape[1])
if datadict['name'] == "dummy_SA_Frequency2":
temp_data = np.linspace(array_param.start,
array_param.stop,
array_param.npts[2])
expected_data = np.tile(temp_data,
expected_shape[1] * expected_shape[2])
if datadict['name'] == "dummy_SA_multidimspectrum":
expected_data = inserted_data.ravel()
assert_allclose(datadict['data'], expected_data)
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.usefixtures("experiment")
def test_datasaver_multidimarrayparameter_as_numeric(SpectrumAnalyzer,
bg_writing):
"""
Test that storing a multidim Array parameter as numeric unravels the
parameter as expected.
"""
array_param = SpectrumAnalyzer.multidimspectrum
meas = Measurement()
meas.register_parameter(array_param, paramtype='numeric')
expected_shape = array_param.shape
dims = len(array_param.shape)
assert len(meas.parameters) == dims + 1
points_expected = np.prod(array_param.npts)
inserted_data = array_param.get()
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((array_param, inserted_data))
assert datasaver.points_written == points_expected
datadicts = _get_data_from_ds(datasaver.dataset)
assert len(datadicts) == 1
for datadict_list in datadicts:
assert len(datadict_list) == 4
for datadict in datadict_list:
datadict['data'].shape = (np.prod(expected_shape),)
if datadict['name'] == "dummy_SA_Frequency0":
temp_data = np.linspace(array_param.start,
array_param.stop,
array_param.npts[0])
expected_data = np.repeat(temp_data,
expected_shape[1] * expected_shape[2])
if datadict['name'] == "dummy_SA_Frequency1":
temp_data = np.linspace(array_param.start,
array_param.stop,
array_param.npts[1])
expected_data = np.tile(np.repeat(temp_data, expected_shape[2]),
expected_shape[0])
if datadict['name'] == "dummy_SA_Frequency2":
temp_data = np.linspace(array_param.start,
array_param.stop,
array_param.npts[2])
expected_data = np.tile(temp_data,
expected_shape[0] * expected_shape[1])
if datadict['name'] == "dummy_SA_multidimspectrum":
expected_data = inserted_data.ravel()
assert_allclose(datadict['data'], expected_data)
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(N=hst.integers(min_value=5, max_value=500))
@pytest.mark.parametrize("bg_writing", [True, False])
@pytest.mark.parametrize("storage_type", ['numeric', 'array'])
@pytest.mark.usefixtures("experiment")
def test_datasaver_array_parameters_channel(channel_array_instrument,
DAC, N, storage_type,
bg_writing):
array_param = channel_array_instrument.A.dummy_array_parameter
meas = Measurement()
meas.register_parameter(DAC.ch1)
meas.register_parameter(array_param, setpoints=[DAC.ch1], paramtype=storage_type)
M = array_param.shape[0]
with meas.run(write_in_background=bg_writing) as datasaver:
for set_v in np.linspace(0, 0.01, N):
datasaver.add_result((DAC.ch1, set_v),
(array_param, array_param.get()))
datadicts = _get_data_from_ds(datasaver.dataset)
# one dependent parameter
assert len(datadicts) == 1
datadicts = datadicts[0]
assert len(datadicts) == len(meas.parameters)
for datadict in datadicts:
assert datadict['data'].shape == (N * M,)
@settings(max_examples=5, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(N=hst.integers(min_value=5, max_value=500))
@pytest.mark.usefixtures("experiment")
@pytest.mark.parametrize("bg_writing", [True, False])
def test_datasaver_array_parameters_array(channel_array_instrument, DAC, N,
bg_writing):
"""
Test that storing array parameters inside a loop works as expected
"""
storage_type = "array"
array_param = channel_array_instrument.A.dummy_array_parameter
dependency_name = 'dummy_channel_inst_ChanA_array_setpoint_param_this_setpoint'
# Now for a real measurement
meas = Measurement()
meas.register_parameter(DAC.ch1, paramtype='numeric')
meas.register_parameter(array_param, setpoints=[DAC.ch1], paramtype=storage_type)
assert len(meas.parameters) == 3
M = array_param.shape[0]
dac_datapoints = np.linspace(0, 0.01, N)
with meas.run(write_in_background=bg_writing) as datasaver:
for set_v in dac_datapoints:
datasaver.add_result((DAC.ch1, set_v),
(array_param, array_param.get()))
datadicts = _get_data_from_ds(datasaver.dataset)
# one dependent parameter
assert len(datadicts) == 1
datadicts = datadicts[0]
assert len(datadicts) == len(meas.parameters)
for datadict in datadicts:
if datadict['name'] == 'dummy_dac_ch1':
expected_data = np.repeat(dac_datapoints, M)
if datadict['name'] == dependency_name:
expected_data = np.tile(np.linspace(5, 9, 5), N)
if datadict['name'] == 'dummy_channel_inst_ChanA_dummy_array_parameter':
expected_data = np.empty(N * M)
expected_data[:] = 2.
assert_allclose(datadict['data'], expected_data)
assert datadict['data'].shape == (N * M,)
@pytest.mark.parametrize("bg_writing", [True, False])
def test_datasaver_multidim_array(experiment, bg_writing):
"""
Test that inserting multidim parameters as arrays works as expected
"""
meas = Measurement(experiment)
size1 = 10
size2 = 15
data_mapping = {name: i for i, name in
zip(range(4), ['x1', 'x2', 'y1', 'y2'])}
x1 = qc.ManualParameter('x1')
x2 = qc.ManualParameter('x2')
y1 = qc.ManualParameter('y1')
y2 = qc.ManualParameter('y2')
meas.register_parameter(x1, paramtype='array')
meas.register_parameter(x2, paramtype='array')
meas.register_parameter(y1, setpoints=[x1, x2], paramtype='array')
meas.register_parameter(y2, setpoints=[x1, x2], paramtype='array')
data = np.random.rand(4, size1, size2)
expected = {'x1': data[0, :, :],
'x2': data[1, :, :],
'y1': data[2, :, :],
'y2': data[3, :, :]}
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((str(x1), expected['x1']),
(str(x2), expected['x2']),
(str(y1), expected['y1']),
(str(y2), expected['y2']))
datadicts = _get_data_from_ds(datasaver.dataset)
assert len(datadicts) == 2
for datadict_list in datadicts:
assert len(datadict_list) == 3
for datadict in datadict_list:
dataindex = data_mapping[datadict['name']]
expected_data = data[dataindex, :, :].ravel()
assert_allclose(datadict['data'], expected_data)
assert datadict['data'].shape == (size1 * size2,)
@pytest.mark.parametrize("bg_writing", [True, False])
def test_datasaver_multidim_numeric(experiment, bg_writing):
"""
Test that inserting multidim parameters as numeric works as expected
"""
meas = Measurement(experiment)
size1 = 10
size2 = 15
x1 = qc.ManualParameter('x1')
x2 = qc.ManualParameter('x2')
y1 = qc.ManualParameter('y1')
y2 = qc.ManualParameter('y2')
data_mapping = {name: i for i, name in
zip(range(4), ['x1', 'x2', 'y1', 'y2'])}
meas.register_parameter(x1, paramtype='numeric')
meas.register_parameter(x2, paramtype='numeric')
meas.register_parameter(y1, setpoints=[x1, x2], paramtype='numeric')
meas.register_parameter(y2, setpoints=[x1, x2], paramtype='numeric')
data = np.random.rand(4, size1, size2)
with meas.run(write_in_background=bg_writing) as datasaver:
datasaver.add_result((str(x1), data[0, :, :]),
(str(x2), data[1, :, :]),
(str(y1), data[2, :, :]),
(str(y2), data[3, :, :]))
datadicts = _get_data_from_ds(datasaver.dataset)
assert len(datadicts) == 2
for datadict_list in datadicts:
assert len(datadict_list) == 3
for datadict in datadict_list:
dataindex = data_mapping[datadict['name']]
expected_data = data[dataindex, :, :].ravel()
assert_allclose(datadict['data'], expected_data)
assert datadict['data'].shape == (size1 * size2,)
|
RasmusBC59/Qcodes | qcodes/dataset/subscriber.py | <filename>qcodes/dataset/subscriber.py
import functools
import logging
import time
from threading import Thread
from typing import Any, Callable, List, Mapping, Optional, TYPE_CHECKING
from queue import Empty, Queue
from qcodes.dataset.sqlite.connection import atomic_transaction
if TYPE_CHECKING:
from qcodes.dataset.data_set import DataSet
class _Subscriber(Thread):
"""
Class to add a subscriber to a :class:`.DataSet`. The subscriber gets called every
time an insert is made to the results_table.
The _Subscriber is not meant to be instantiated directly, but rather used
via the 'subscribe' method of the :class:`.DataSet`.
NOTE: A subscriber should be added *after* all parameters have been added.
NOTE: Special care shall be taken when using the *state* object: it is the
user's responsibility to operate with it in a thread-safe way.
"""
def __init__(self,
dataSet: 'DataSet',
id_: str,
callback: Callable[..., None],
state: Optional[Any] = None,
loop_sleep_time: int = 0, # in milliseconds
min_queue_length: int = 1,
callback_kwargs: Optional[Mapping[str, Any]] = None
) -> None:
super().__init__()
self._id = id_
self.dataSet = dataSet
self.table_name = dataSet.table_name
self._data_set_len = len(dataSet)
self.state = state
self.data_queue: "Queue[Any]" = Queue()
self._queue_length: int = 0
self._stop_signal: bool = False
# convert milliseconds to seconds
self._loop_sleep_time = loop_sleep_time / 1000
self.min_queue_length = min_queue_length
if callback_kwargs is None or len(callback_kwargs) == 0:
self.callback = callback
else:
self.callback = functools.partial(callback, **callback_kwargs)
self.callback_id = f"callback{self._id}"
self.trigger_id = f"sub{self._id}"
conn = dataSet.conn
conn.create_function(self.callback_id, -1, self._cache_data_to_queue)
parameters = dataSet.get_parameters()
sql_param_list = ",".join([f"NEW.{p.name}" for p in parameters])
sql_create_trigger_for_callback = f"""
CREATE TRIGGER {self.trigger_id}
AFTER INSERT ON '{self.table_name}'
BEGIN
SELECT {self.callback_id}({sql_param_list});
END;"""
atomic_transaction(conn, sql_create_trigger_for_callback)
self.log = logging.getLogger(f"_Subscriber {self._id}")
def _cache_data_to_queue(self, *args: Any) -> None:
self.data_queue.put(args)
self._data_set_len += 1
self._queue_length += 1
def run(self) -> None:
self.log.debug("Starting subscriber")
self._loop()
@staticmethod
def _exhaust_queue(queue: "Queue[Any]") -> List[Any]:
result_list = []
while True:
try:
result_list.append(queue.get(block=False))
except Empty:
break
return result_list
def _call_callback_on_queue_data(self) -> None:
result_list = self._exhaust_queue(self.data_queue)
self.callback(result_list, self._data_set_len, self.state)
def _loop(self) -> None:
while True:
if self._stop_signal:
self._clean_up()
break
if self._queue_length >= self.min_queue_length:
self._call_callback_on_queue_data()
self._queue_length = 0
time.sleep(self._loop_sleep_time)
if self.dataSet.completed:
self._call_callback_on_queue_data()
break
def done_callback(self) -> None:
self._call_callback_on_queue_data()
def schedule_stop(self) -> None:
if not self._stop_signal:
self.log.debug("Scheduling stop")
self._stop_signal = True
def _clean_up(self) -> None:
self.log.debug("Stopped subscriber")
|
RasmusBC59/Qcodes | qcodes/dataset/json_exporter.py | <reponame>RasmusBC59/Qcodes
from typing import Any, Dict
json_template_linear = {"type": 'linear',
'x': {'data': [], 'name': "", 'full_name': '', 'is_setpoint':True, 'unit':''},
'y': {'data': [], 'name': "", 'full_name': '', 'is_setpoint':False, 'unit':''}}
json_template_heatmap = {"type": 'heatmap',
'x': {'data': [], 'name': "", 'full_name': '', 'is_setpoint':True, 'unit':''},
'y': {'data': [], 'name': "", 'full_name': '', 'is_setpoint':True, 'unit':''},
'z': {'data': [], 'name': "", 'full_name': '', 'is_setpoint':False, 'unit':''}}
def export_data_as_json_linear(
data: Any, length: int, state: Dict[str, Any], location: str) -> None:
import numpy as np
import json
if len(data) > 0:
npdata = np.array(data)
xdata = npdata[:,0]
ydata = npdata[:,1]
state['json']['x']['data'] += xdata.tolist()
state['json']['y']['data'] += ydata.tolist()
with open(location, mode='w') as f:
json.dump(state['json'], f)
def export_data_as_json_heatmap(
data: Any, length: int, state: Dict[str, Any], location: str) -> None:
import numpy as np
import json
if len(data) > 0:
npdata = np.array(data)
array_start = state['data']['location']
array_end = length
state['data']['x'][array_start:array_end] = npdata[:, 0]
state['data']['y'][array_start:array_end] = npdata[:, 1]
state['data']['z'][array_start:array_end] = npdata[:, 2]
state['data']['location'] = array_end
state['json']['x']['data'] = state['data']['x'][
0:-1:state['data']['ylen']].tolist()
state['json']['y']['data'] = state['data']['y'][
0:state['data']['ylen']].tolist()
state['json']['z']['data'] = state['data']['z'].reshape(
state['data']['xlen'], state['data']['ylen']).tolist()
with open(location, mode='w') as f:
json.dump(state['json'], f)
|
RasmusBC59/Qcodes | qcodes/instrument_drivers/Keysight/keysight_34934a.py | <reponame>RasmusBC59/Qcodes
import logging
import re
from qcodes import VisaInstrument, InstrumentChannel, validators
from typing import Union, List, Tuple, Optional, Callable
from .keysight_34980a_submodules import KeysightSwitchMatrixSubModule
class Keysight34934A(KeysightSwitchMatrixSubModule):
"""
Create an instance for module 34933A.
Args:
parent: the system which the module is installed on
name: user defined name for the module
slot: the slot the module is installed
"""
def __init__(
self,
parent: Union[VisaInstrument, InstrumentChannel],
name: str,
slot: int
) -> None:
super().__init__(parent, name, slot)
self.add_parameter(name='protection_mode',
get_cmd=self._get_relay_protection_mode,
set_cmd=self._set_relay_protection_mode,
vals=validators.Enum('AUTO100',
'AUTO0',
'FIX',
'ISO'),
docstring='get and set the relay protection mode.'
'The fastest switching speeds for relays'
'in a given signal path are achieved using'
'the FIXed or ISOlated modes, followed'
'by the AUTO100 and AUTO0 modes.'
'There may be a maximum of 200 Ohm of'
'resistance, which can only be bypassed'
'by "AUTO0" mode. See manual and'
'programmer''s reference for detail.')
layout = self.ask(f'SYSTEM:MODule:TERMinal:TYPE? {self.slot}')
self._is_locked = (layout == 'NONE')
if self._is_locked:
logging.warning(f'For slot {slot}, no configuration module'
f'connected, or safety interlock jumper removed. '
"Making any connection is not allowed")
config = self.ask(f'SYST:CTYP? {slot}').strip('"').split(',')[1]
layout = config.split('-')[1]
self.row, self.column = [
int(num) for num in re.findall(r'\d+', layout)
]
def write(self, cmd: str) -> None:
"""
When the module is safety interlocked, users can not make any
connections. There will be no effect when try to connect any channels.
"""
if self._is_locked:
logging.warning("Warning: no configuration module connected, "
"or safety interlock enabled. "
"Making any connection is not allowed")
return self.parent.write(cmd)
def validate_value(self, row: int, column: int) -> None:
"""
to check if the row and column number is within the range of the
module layout.
Args:
row: row value
column: column value
"""
if (row > self.row) or (column > self.column):
raise ValueError('row/column value out of range')
def _get_relay_protection_mode(self) -> str:
return self.ask(f'SYSTem:MODule:ROW:PROTection? {self.slot}')
def _set_relay_protection_mode(self, mode: str) -> None:
self.write(f'SYSTem:MODule:ROW:PROTection {self.slot}, {mode}')
def to_channel_list(
self,
paths: List[Tuple[int, int]],
wiring_config: Optional[str] = ''
) -> str:
"""
convert the (row, column) pair to a 4-digit channel number 'sxxx', where
s is the slot number, xxx is generated from the numbering function.
Args:
paths: list of channels to connect [(r1, c1), (r2, c2), (r3, c3)]
wiring_config: for 1-wire matrices, values are 'MH', 'ML';
for 2-wire matrices, values are 'M1H', 'M2H',
'M1L', 'M2L'
Returns:
in the format of '(@sxxx, sxxx, sxxx, sxxx)', where sxxx is a
4-digit channel number
"""
numbering_function = self.get_numbering_function(
self.row,
self.column,
wiring_config
)
channels = []
for row, column in paths:
channel = f'{self.slot}{numbering_function(row, column)}'
channels.append(channel)
channel_list = f"(@{','.join(channels)})"
return channel_list
@staticmethod
def get_numbering_function(
rows: int,
columns: int,
wiring_config: Optional[str] = ''
) -> Callable[[int, int], str]:
"""
to select the correct numbering function based on the matrix layout.
On P168 of the user's guide for Agilent 34934A High Density Matrix
Module:
http://literature.cdn.keysight.com/litweb/pdf/34980-90034.pdf
there are eleven equations. This function here simplifies them to one.
Args:
rows: the total row number of the matrix module
columns: the total column number of the matrix module
wiring_config: wiring configuration for 1 or 2 wired matrices
Returns:
The numbering function to convert row and column in to a 3-digit
number
"""
layout = f'{rows}x{columns}'
available_layouts = {
"4x32": ["M1H", "M2H", "M1L", "M2L"],
"4x64": ["MH", "ML"],
"4x128": [''],
"8x32": ["MH", "ML"],
"8x64": [''],
"16x32": ['']
}
if layout not in available_layouts:
raise ValueError(f"Unsupported layout: {layout}")
if wiring_config not in available_layouts[layout]:
raise ValueError(
f"Invalid wiring config '{wiring_config}' for layout {layout}"
)
offsets = {
"M1H": 0,
"M2H": 1,
"M1L": 2,
"M2L": 3,
"MH": 0,
"ML": 1
}
offset = 0
if wiring_config != '':
offset = offsets[wiring_config] * columns
channels_per_row = 800 / rows
offset += 100 - int(channels_per_row)
def numbering_function(row: int, col: int) -> str:
return str(int(channels_per_row * row + col + offset))
return numbering_function
|
crablab/bme280_logging | server.py | <gh_stars>0
import requests, json
import time
from influxdb import InfluxDBClient
from datetime import datetime
client = InfluxDBClient('192.168.1.67', 8086, '', '', 'weather')
while (1):
r = requests.get("http://192.168.1.91")
data = r.json()
print(json.dumps(data))
temp = [
{
"measurement": "temp",
"tags": {
},
"time": datetime.utcfromtimestamp(time.time()),
"fields": {
"value": data['temp']
}
}
]
pres = [
{
"measurement": "pres",
"tags": {
},
"time": datetime.utcfromtimestamp(time.time()),
"fields": {
"value": data['pres']
}
}
]
hum = [
{
"measurement": "hum",
"tags": {
},
"time": datetime.utcfromtimestamp(time.time()),
"fields": {
"value": data['hum']
}
}
]
client.write_points(temp)
client.write_points(pres)
client.write_points(hum)
time.sleep(60)
|
pvnieo/Learn-Noughts-and-Crosses | train.py | # stdlib
import random
import argparse
import pickle
from collections import defaultdict
from itertools import cycle
# 3p
import numpy as np
class TicTacToeTraining:
def __init__(self, args):
# RL params
self.Q = defaultdict(lambda: [0 for _ in range(9)]) # Q function
self.R = {"1": 1, "2": -1, "0": 0.5} # reward
self.epsilon = args.epsilon
self.eps_step = (self.epsilon * 1.2) / args.niter
self.alpha = args.alpha
self.gamma = args.gamma
self.niter = args.niter
# function playing for players
self.play_func = {"1": self.play_rl, "2": self.play_ai}
# set properties of game
self.count = {"1": 0, "2": 0, "0": 0} # Counter of score of the game
self.turns = cycle(["1", "2"])
self.level = 1
# winning combinations
self.wins = [{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {0, 3, 6}, {1, 4, 7}, {2, 5, 8}, {0, 4, 8}, {2, 4, 6}]
def train(self):
for i in range(self.niter):
self.epsilon -= self.eps_step
if i < (self.niter // 3):
self.level = 1
elif i < (2 * self.niter // 3):
self.level = 2
else:
self.level = 9
self.new_game()
with open(f"ql_{self.niter}_{self.alpha}.pkl", "wb") as f:
pickle.dump(dict(self.Q), f)
def new_game(self):
self.state = ['.' for _ in range(9)]
self.transitions = []
self.possible_movs = list(range(9))
self.movs = {"1": set(), "2": set()} # list of movs of each player
winner = "0"
while True:
turn_of = next(self.turns)
winner = self.play_func[turn_of](turn_of)
# q iteration at the end of episode
if winner != "0" or not self.possible_movs:
s, a, *_ = self.transitions.pop(-1)
self.Q[s][a] *= 1 - self.alpha
self.Q[s][a] += self.alpha * self.R[winner]
for s, a, sp in self.transitions[::-1]:
self.Q[s][a] *= 1 - self.alpha
self.Q[s][a] += self.alpha * self.gamma * max(self.Q[sp])
self.count[winner] += 1
break
print(f'Player 1: {self.count["1"]} | Player 2: {self.count["2"]} | Tie: {self.count["0"]}\r', end='')
def play_rl(self, turn_of):
state_hash = "".join(self.state)
for i in set(range(9)) - set(self.possible_movs): # -inf for illegal movs
self.Q[state_hash][i] = - float("inf")
# epsilon-greedy policy
if random.random() < self.epsilon:
case_n = random.choice(self.possible_movs)
else:
case_n = np.argmax(self.Q[state_hash])
self.state[case_n] = turn_of
self.s, self.a = state_hash, case_n
self.transitions.append([state_hash, case_n])
return self.do_step(turn_of, case_n)
def play_ai(self, turn_of):
case_n = random.choice(self.possible_movs)
self.state[case_n] = turn_of
if self.transitions:
self.transitions[-1].append("".join(self.state))
return self.do_step(turn_of, case_n)
def do_step(self, turn_of, case_n):
self.possible_movs.remove(case_n)
self.movs[turn_of].add(case_n)
is_win = self.is_win(self.movs[turn_of])
if is_win:
return turn_of
return "0"
def is_win(self, movs):
for win in self.wins:
if len(win.intersection(movs)) == 3:
return win
return False
def main(args):
game = TicTacToeTraining(args)
game.train()
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Training an RL agent to play Tic Tac Toe using Tabular Q Learning"
)
parser.add_argument("-ni", "--niter", type=int, default=10000,
help="Number of training episodes")
parser.add_argument("-e", "--epsilon", type=float, default=0.7,
help="Exploration rate. Will be reduced to 0 as training progresses.")
parser.add_argument("-a", "--alpha", type=float, default=0.6,
help="Learning rate of Q learning algorithm")
parser.add_argument("-g", "--gamma", type=float, default=0.95,
help="Discount factor")
args = parser.parse_args()
main(args)
|
pvnieo/Learn-Noughts-and-Crosses | play_tic_tac_toe.py | <filename>play_tic_tac_toe.py
# stdlib
import random
import time
import pickle
import argparse
from argparse import RawTextHelpFormatter
from collections import defaultdict
from itertools import cycle
# 3p
import pygame
import numpy as np
class Board:
def __init__(self, w=900):
self.w = w
self.c = w // 3
self.c2 = w // 6
self.draw_board()
def draw_board(self):
self.screen = pygame.display.set_mode((self.w, self.w))
self.screen.fill((0, 0, 125))
pygame.display.set_caption("Tic Tac Toe")
pygame.draw.line(self.screen, (255, 255, 255), (self.c, 0), (self.c, self.w))
pygame.draw.line(self.screen, (255, 255, 255), (2*self.c, 0), (2*self.c, self.w))
pygame.draw.line(self.screen, (255, 255, 255), (0, self.c), (self.w, self.c))
pygame.draw.line(self.screen, (255, 255, 255), (0, 2 * self.c), (self.w, 2 * self.c))
pygame.display.flip()
def draw_x(self, case_n):
x, y = self.pos_to_coord(case_n)
x, y = x // self.c * self.c, y // self.c * self.c
pygame.draw.line(self.screen, (255, 255, 255), (x, y), (x + self.c, y + self.c))
pygame.draw.line(self.screen, (255, 255, 255), (x, y + self.c), (x + self.c, y))
pygame.display.flip()
def draw_o(self, case_n):
x, y = self.pos_to_coord(case_n)
pos = (x // self.c * self.c + self.c // 2, y // self.c * self.c + self.c // 2)
pygame.draw.circle(self.screen, (255, 255, 255), pos, self.c // 2, 1)
pygame.display.flip()
def draw_win_line(self, pos1, pos2):
pygame.draw.line(self.screen, (255, 0, 0), (self.c * (pos1 % 3) + self.c2, self.c * (pos1 // 3) + self.c2),
(self.c * (pos2 % 3) + self.c2, self.c * (pos2 // 3) + self.c2), 16)
pygame.display.flip()
def coord_to_pos(self, x, y):
return (x // self.c) + 3 * (y // self.c)
def pos_to_coord(self, pos):
return (int(self.c * 1.2) * (pos % 3), int(self.c * 1.2) * (pos // 3))
class TicTacToe:
def __init__(self, args):
# set game argument
self.w = args.width
self.player1 = args.player1 # player 1 plays with "X"
self.player2 = args.player2 # player 2 plays with "O"
levels = {'easy': 1, 'medium': 2, 'hard': 9}
self.level = levels[args.level]
self.waiting_time = args.time
# set properties
self.count = {"1": 0, "2": 0, "0": 0} # Counter of score of the game
self.turns = cycle(["1", "2"])
# function playing for players
players = {"human": self.play_human, "minmax": self.play_minmax, "ql": self.play_ql, "random": self.play_random}
self.play_func = {"1": players[self.player1], "2": players[self.player2]}
# winning combinations
self.wins = [{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {0, 3, 6}, {1, 4, 7}, {2, 5, 8}, {0, 4, 8}, {2, 4, 6}]
self.minmax_dict = {} # store results of minmax algorithms to use later (improve speed)
# load Q learning model
with open("ql.pkl", "rb") as f:
self.Q = pickle.load(f)
def new_game(self):
self.board = Board(self.w)
self.state = ['.' for _ in range(9)]
self.possible_movs = list(range(9))
self.movs = {"1": set(), "2": set()} # list of movs of each player
winner = 0
while True:
turn_of = next(self.turns)
winner = self.play_func[turn_of](turn_of)
if winner != "0" or not self.possible_movs:
self.count[winner] += 1
time.sleep(self.waiting_time)
break
print(f'Player 1: {self.count["1"]} | Player 2: {self.count["2"]} | Tie: {self.count["0"]}\r', end='')
def play_human(self, turn_of):
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
if self.board.coord_to_pos(*event.pos) not in self.possible_movs:
continue
case_n = self.board.coord_to_pos(*event.pos)
return self.do_step(turn_of, case_n)
def play_minmax(self, turn_of):
time.sleep(0.1)
turn_op = "1" if turn_of == "2" else "2"
case_n = self.minmax(self.movs[turn_of], self.movs[turn_op], depth=self.level, max_step=True)[1]
return self.do_step(turn_of, case_n)
def play_ql(self, turn_of):
time.sleep(0.1)
state_hash = "".join(self.state)
if turn_of == "2":
state_hash = state_hash.replace("1", "?").replace("2", "1").replace("?", "2")
if state_hash in self.Q:
case_n = np.argmax(self.Q[state_hash])
else:
case_n = random.choice(self.possible_movs)
return self.do_step(turn_of, case_n)
def play_random(self, turn_of):
time.sleep(0.1)
case_n = random.choice(self.possible_movs)
return self.do_step(turn_of, case_n)
def do_step(self, turn_of, case_n):
self.state[case_n] = turn_of
self.possible_movs.remove(case_n)
self.draw_case(turn_of, case_n)
self.movs[turn_of].add(case_n)
is_win = self.is_win(self.movs[turn_of])
if is_win:
self.board.draw_win_line(min(is_win), max(is_win))
return turn_of
return "0"
def draw_case(self, to_draw, case_n):
self.board.draw_x(case_n) if to_draw == "1" else self.board.draw_o(case_n)
def is_win(self, movs):
for win in self.wins:
if len(win.intersection(movs)) == 3:
return win
return False
def minmax(self, my_moves, op_moves, depth=1, max_step=True):
sign = 1 if max_step else -1
if (tuple(sorted(my_moves)), tuple(sorted(op_moves)), depth, sign) in self.minmax_dict:
return self.minmax_dict[(tuple(sorted(my_moves)), tuple(sorted(op_moves)), depth, sign)]
if depth == 0 or (len(my_moves) + len(op_moves) == 9):
if self.is_win(my_moves):
M = 1 * sign
else:
M = -1 * sign if self.is_win(op_moves) else 0
self.minmax_dict[(tuple(sorted(my_moves)), tuple(sorted(op_moves)), depth, sign)] = (M, -1)
return (M, -1)
if self.is_win(my_moves):
self.minmax_dict[(tuple(sorted(my_moves)), tuple(sorted(op_moves)), depth, sign)] = (1 * sign, -1)
return (1 * sign, -1)
if self.is_win(op_moves):
self.minmax_dict[(tuple(sorted(my_moves)), tuple(sorted(op_moves)), depth, sign)] = (-1 * sign, -1)
return (-1 * sign, -1)
d = defaultdict(list)
for i in set(range(9)) - my_moves.union(op_moves):
my_moves_copy = my_moves.copy()
my_moves_copy.add(i)
m = self.minmax(op_moves, my_moves_copy, depth - 1, not max_step)[0] * sign
d[m].append(i)
M, best_move = max(d.keys()), random.choice(d[max(d.keys())])
self.minmax_dict[(tuple(sorted(my_moves)), tuple(sorted(op_moves)), depth, sign)] = (M * sign, best_move)
return (M * sign, best_move)
def main(args):
game = TicTacToe(args)
for _ in range(args.total_games):
game.new_game()
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Implementation of a tic tac toe game with levels and multiple AIs.",
formatter_class=RawTextHelpFormatter
)
parser.add_argument("-l", "--level", choices=["easy", "medium", "hard"], default="medium",
help="""Level of the game, used only if minmax algorithm is selected.
Concretely, this represents the depth of the evaluated minmax tree:
* easy: depth = 1
* medium: depth = 2
* hard: depth = 9""")
parser.add_argument("-p1", "--player1", choices=["human", "minmax", "ql", "random"], default="human",
help="""Controler of player 1, always plays with 'X'.
Player can be either:
* human: a human player gives the instruction
* minmax: a minmax with a depth of tree = level
* ql: a Q learning agent
* random: a random agent""")
parser.add_argument("-p2", "--player2", choices=["human", "minmax", "ql", "random"], default="minmax",
help="""Controler of player 2, always plays with 'O'.
Player can be either:
* human: a human player gives the instruction
* minmax: a minmax with a depth of tree = level
* ql: a Q learning agent
* random: a random agent""")
parser.add_argument("-w", "--width", type=int, default=900,
help="Width and height of the board")
parser.add_argument("-t", "--time", type=int, default=2,
help="Waiting time between two consecutive games in secondes")
parser.add_argument("-tg", "--total_games", type=int, default=10,
help="Maximum number of games that will be played")
args = parser.parse_args()
main(args)
|
leakyH/mia | examples/protect_code/cifar10_lager_shadow.py | """
Example membership inference attack against a deep net classifier on the CIFAR10 dataset
"""
import numpy as np
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
from mia.estimators import ShadowModelBundle, AttackModelBundle, prepare_attack_data
NUM_CLASSES = 10
WIDTH = 32
HEIGHT = 32
CHANNELS = 3
SHADOW_DATASET_SIZE = 4500
#largest:8100
ATTACK_TEST_DATASET_SIZE = 4500
BATCH_SIZE=32
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"target_epochs", 10, "Number of epochs to train target and shadow models."
)
flags.DEFINE_integer("attack_epochs", 10, "Number of epochs to train attack models.")
flags.DEFINE_integer("num_shadows", 3, "Number of epochs to train attack models.")
def get_data():
"""Prepare CIFAR10 data."""
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data()
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
X_train /= 255
X_test /= 255
return (X_train, y_train), (X_test, y_test)
def target_model_fn():
"""The architecture of the target (victim) model.
The attack is white-box, hence the attacker is assumed to know this architecture too."""
model = tf.keras.models.Sequential()
model.add(
layers.Conv2D(
32,
(3, 3),
activation="relu",
padding="same",
input_shape=(WIDTH, HEIGHT, CHANNELS),
)
)
model.add(layers.Conv2D(32, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(64, (3, 3), activation="relu", padding="same"))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
model.compile("adam", loss="categorical_crossentropy", metrics=["accuracy"])
return model
def attack_model_fn():
"""Attack model that takes target model predictions and predicts membership.
Following the original paper, this attack model is specific to the class of the input.
AttachModelBundle creates multiple instances of this model for each class.
"""
model = tf.keras.models.Sequential()
model.add(layers.Dense(128, activation="relu", input_shape=(NUM_CLASSES,)))
model.add(layers.Dropout(0.3, noise_shape=None, seed=None))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dropout(0.2, noise_shape=None, seed=None))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile("adam", loss="binary_crossentropy", metrics=["accuracy"])
return model
def demo(argv):
del argv # Unused.
(X_train, y_train), (X_test, y_test) = get_data()
# Train the target model.
print("Training the target model...")
target_model = target_model_fn()
target_model.fit(
X_train, y_train,batch_size=BATCH_SIZE, epochs=FLAGS.target_epochs, validation_split=0.1, verbose=True
)
# Train the shadow models.
smb = ShadowModelBundle(
target_model_fn,
shadow_dataset_size=SHADOW_DATASET_SIZE,
num_models=FLAGS.num_shadows,
)
# We assume that attacker's data were not seen in target's training.
attacker_X_train, attacker_X_test, attacker_y_train, attacker_y_test = train_test_split(
X_test, y_test, test_size=0.1
)
print(attacker_X_train.shape, attacker_X_test.shape)
print("Training the shadow models...")
X_shadow, y_shadow = smb.fit_transform(
attacker_X_train,
attacker_y_train,
fit_kwargs=dict(
batch_size=BATCH_SIZE,
epochs=FLAGS.target_epochs,
verbose=True,
validation_data=(attacker_X_test, attacker_y_test),
),
)
# ShadowModelBundle returns data in the format suitable for the AttackModelBundle.
amb = AttackModelBundle(attack_model_fn, num_classes=NUM_CLASSES)
# Fit the attack models.
print("Training the attack models...")
amb.fit(
X_shadow, y_shadow, fit_kwargs=dict(epochs=FLAGS.attack_epochs, verbose=True)
)
# Test the success of the attack.
# Prepare examples that were in the training, and out of the training.
data_in = X_train[:ATTACK_TEST_DATASET_SIZE], y_train[:ATTACK_TEST_DATASET_SIZE]
data_out = X_test[:ATTACK_TEST_DATASET_SIZE], y_test[:ATTACK_TEST_DATASET_SIZE]
# Compile them into the expected format for the AttackModelBundle.
attack_test_data, real_membership_labels = prepare_attack_data(
target_model, data_in, data_out
)
# Compute the attack accuracy.
attack_guesses = amb.predict(attack_test_data)
attack_accuracy = np.mean(attack_guesses == real_membership_labels)
print(attack_accuracy)
if __name__ == "__main__":
app.run(demo)
|
leakyH/mia | examples/code/target model/CNN_capsule.py | """
Example membership inference attack against a deep net classifier on the CIFAR10 dataset
"""
import numpy as np
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
from mia.estimators import ShadowModelBundle, AttackModelBundle, prepare_attack_data
from tensorflow.keras import activations
from tensorflow.keras import backend as K
from tensorflow.keras import utils
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
NUM_CLASSES = 10
WIDTH = 32
HEIGHT = 32
CHANNELS = 3
SHADOW_DATASET_SIZE = 4000
ATTACK_TEST_DATASET_SIZE = 4000
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"target_epochs", 50, "Number of epochs to train target and shadow models."
)
flags.DEFINE_integer("attack_epochs", 50, "Number of epochs to train attack models.")
flags.DEFINE_integer("num_shadows", 10, "Number of epochs to train attack models.")
def squash(x, axis=-1):
"""The Squashing Function.
The nonlinear activation function used in Capsule Network
# Arguments
x: Input Tensor.
axis: Integer axis along which the squashing function is to be applied.
# Returns
Tensor with scaled value of the input tensor
"""
s_squared_norm = K.sum(K.square(x), axis, keepdims=True) + K.epsilon()
scale = K.sqrt(s_squared_norm) / (0.5 + s_squared_norm)
return scale * x
def margin_loss(y_true, y_pred):
"""Margin loss
# Arguments
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
# Returns
Tensor with one scalar loss entry per sample.
"""
lamb, margin = 0.5, 0.1
return K.sum(y_true * K.square(K.relu(1 - margin - y_pred)) + lamb * (
1 - y_true) * K.square(K.relu(y_pred - margin)), axis=-1)
class Capsule(layers.Layer):
"""Capsule Network
A Capsule Network Layer implementation in Keras
There are two versions of Capsule Networks.
One is similar to dense layer (for the fixed-shape input),
and the other is similar to time distributed dense layer
(for inputs of varied length).
The input shape of Capsule must be (batch_size,
input_num_capsule,
input_dim_capsule
)
and the output shape is (batch_size,
num_capsule,
dim_capsule
)
The Capsule implementation is from https://github.com/bojone/Capsule/
# Arguments
num_capsule: An integer, the number of capsules.
dim_capsule: An integer, the dimensions of the capsule.
routings: An integer, the number of routings.
share_weights: A boolean, sets weight sharing between layers.
activation: A string, the activation function to be applied.
"""
def __init__(self,
num_capsule,
dim_capsule,
routings=3,
share_weights=True,
activation='squash',
**kwargs):
super(Capsule, self).__init__(**kwargs)
self.num_capsule = num_capsule
self.dim_capsule = dim_capsule
self.routings = routings
self.share_weights = share_weights
if activation == 'squash':
self.activation = squash
else:
self.activation = activations.get(activation)
def build(self, input_shape):
input_dim_capsule = input_shape[-1]
if self.share_weights:
self.kernel = self.add_weight(
name='capsule_kernel',
shape=(1, input_dim_capsule,
self.num_capsule * self.dim_capsule),
initializer='glorot_uniform',
trainable=True)
else:
input_num_capsule = input_shape[-2]
self.kernel = self.add_weight(
name='capsule_kernel',
shape=(input_num_capsule, input_dim_capsule,
self.num_capsule * self.dim_capsule),
initializer='glorot_uniform',
trainable=True)
def call(self, inputs, **kwargs):
"""Following the routing algorithm from Hinton's paper,
but replace b = b + <u,v> with b = <u,v>.
This change can improve the feature representation of the capsule.
However, you can replace
b = K.batch_dot(outputs, hat_inputs, [2, 3])
with
b += K.batch_dot(outputs, hat_inputs, [2, 3])
to get standard routing.
"""
if self.share_weights:
hat_inputs = K.conv1d(inputs, self.kernel)
else:
hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])
batch_size = K.shape(inputs)[0]
input_num_capsule = K.shape(inputs)[1]
hat_inputs = K.reshape(hat_inputs,
(batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))
b = K.zeros_like(hat_inputs[:, :, :, 0])
print(self.routings)
for i in range(self.routings):
c = K.softmax(b, 1)
o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(o, hat_inputs, [2, 3])
if K.backend() == 'theano':
o = K.sum(o, axis=1)
return o
def compute_output_shape(self, input_shape):
return None, self.num_capsule, self.dim_capsule
def get_data():
"""Prepare CIFAR10 data."""
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data()
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
y_train = tf.keras.utils.to_categorical(y_train,10)
y_test = tf.keras.utils.to_categorical(y_test,10)
return (X_train, y_train), (X_test, y_test)
def target_model_fn():
"""The architecture of the target (victim) model.
The attack is white-box, hence the attacker is assumed to know this architecture too."""
# A simple Conv2D model
input_image = layers.Input(shape=(None, None, 3))
x = layers.Conv2D(64, (3, 3), activation='relu')(input_image)
x = layers.Conv2D(64, (3, 3), activation='relu')(x)
x = layers.AveragePooling2D((2, 2))(x)
x = layers.Conv2D(128, (3, 3), activation='relu')(x)
x = layers.Conv2D(128, (3, 3), activation='relu')(x)
# Now, we reshape it to (batch_size, input_num_capsule, input_dim_capsule)
# then connect a capsule layer.
# The output of final model is the lengths of 10 capsules, which have 16 dimensions.
# The length of the output vector of the capsule expresses the probability of
# existence of the entity, so the problem becomes a 10 two-classification problem.
x = layers.Reshape((-1, 128))(x)
capsule = Capsule(10, 16, 3, True)(x)
output = layers.Lambda(lambda z: K.sqrt(K.sum(K.square(z), 2)))(capsule)
model = Model(inputs=input_image, outputs=output)
# Margin loss is used
model.compile(loss=margin_loss, optimizer='adam', metrics=['accuracy'])
model.summary()
return model
def attack_model_fn():
"""Attack model that takes target model predictions and predicts membership.
Following the original paper, this attack model is specific to the class of the input.
AttachModelBundle creates multiple instances of this model for each class.
"""
model = tf.keras.models.Sequential()
model.add(layers.Dense(128, activation="relu", input_shape=(NUM_CLASSES,)))
model.add(layers.Dropout(0.3, noise_shape=None, seed=None))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dropout(0.2, noise_shape=None, seed=None))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile("adam", loss="binary_crossentropy", metrics=["accuracy"])
return model
def demo(argv):
del argv # Unused.
(X_train, y_train), (X_test, y_test) = get_data()
# Train the target model.
print("Training the target model...")
target_model = target_model_fn()
target_model.fit(
X_train, y_train, batch_size=128, epochs=FLAGS.target_epochs, validation_split=0.5, shuffle=True
)
#target_model.fit(
# X_train, y_train, batch_size=128, epochs=FLAGS.target_epochs, validation_data=(X_test, y_test), shuffle=True
#)
# Train the shadow models.
smb = ShadowModelBundle(
target_model_fn,
shadow_dataset_size=SHADOW_DATASET_SIZE,
num_models=FLAGS.num_shadows,
)
# We assume that attacker's data were not seen in target's training.
attacker_X_train, attacker_X_test, attacker_y_train, attacker_y_test = train_test_split(
X_test, y_test, test_size=0.1
)
print(attacker_X_train.shape, attacker_X_test.shape)
print("Training the shadow models...")
X_shadow, y_shadow = smb.fit_transform(
attacker_X_train,
attacker_y_train,
fit_kwargs=dict(
epochs=FLAGS.target_epochs,
verbose=True,
validation_data=(attacker_X_test, attacker_y_test),
),
)
# ShadowModelBundle returns data in the format suitable for the AttackModelBundle.
amb = AttackModelBundle(attack_model_fn, num_classes=NUM_CLASSES)
# Fit the attack models.
print("Training the attack models...")
amb.fit(
X_shadow, y_shadow, fit_kwargs=dict(epochs=FLAGS.attack_epochs, verbose=True)
)
# Test the success of the attack.
# Prepare examples that were in the training, and out of the training.
data_in = X_train[:ATTACK_TEST_DATASET_SIZE], y_train[:ATTACK_TEST_DATASET_SIZE]
data_out = X_test[:ATTACK_TEST_DATASET_SIZE], y_test[:ATTACK_TEST_DATASET_SIZE]
# Compile them into the expected format for the AttackModelBundle.
attack_test_data, real_membership_labels = prepare_attack_data(
target_model, data_in, data_out
)
# Compute the attack accuracy.
attack_guesses = amb.predict(attack_test_data)
attack_accuracy = np.mean(attack_guesses == real_membership_labels)
print(attack_accuracy)
if __name__ == "__main__":
app.run(demo)
|
leocll/CLLRepo | CLLRepo/Git/git_upgrade.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'leocll'
import os, sys, json
from functools import reduce
def log_git(info):
# print(info)
pass
# for p in sys.argv:
# print('外带参数:%s' % p)
def basepath():
return '/Users/leocll/SVN项目/hftapp'
def filelist4path(path, fn=lambda x:x.find('.', 0, 1)==-1):
log_git('路径:%s' % path)
filelist = os.listdir(path)
filelist = list(filter(lambda x: os.path.isfile(os.path.join(path, x)) and fn(x),filelist))
log_git('文件:%s' % filelist)
return filelist
def dirlist4path(path, fn=lambda x:x.find('.', 0, 1)==-1):
log_git('路径:%s' % path)
filelist = os.listdir(path)
dirlist = list(filter(lambda x: os.path.isdir(os.path.join(path, x)) and fn(x),filelist))
log_git('文件夹:%s' % dirlist)
return sorted(dirlist,key=str.lower)
def max4verdirlist(dirlist):
vlist = list(map(lambda x: x.split('.'), dirlist))
if vlist is None or len(vlist) == 0:
return None
if len(vlist) == 1:
return '.'.join(vlist[0])
def str2int(si):
try:
return int(si)
except BaseException:
return -1
def value4list(alist, i):
if not isinstance(alist, list):
raise ValueError
if i < len(alist):
return str2int(alist[i])
return -1
index = 0
def maxintfn(x, y):
xv = value4list(x, index) if isinstance(x, list) else x
yv = value4list(y, index)
return xv if xv > yv else yv
while (True):
if len(vlist) == 1:
return '.'.join(vlist[0])
maxint = reduce(maxintfn, vlist)
if maxint == -1:
return '.'.join(vlist[0])
vlist = list(filter(lambda x: value4list(x, index) != -1 and str2int(x[index]) == maxint, vlist))
index = index + 1
def maxverdir4path(path):
dirlist = dirlist4path(path)
return max4verdirlist(dirlist)
def file_content_rp(path, ostr, nstr):
if os.path.isdir(path):
filelist = filelist4path(path, fn=lambda x:x.find('.podspec')!=-1)
if len(filelist) == 0:
raise ValueError('%s has no .podspec file' % path)
else:
path = os.path.join(path, filelist[0])
if path.rfind('.podspec', -8) == -1:
raise ValueError('%s is not a .podspec file' % path)
log_git('updating %s' % path)
try:
with open(path, 'r') as rf:
content = rf.read()
content = content.replace(ostr, nstr)
with open(path, 'w') as wf:
wf.write(content)
except BaseException:
raise IOError('%s replace failed' % path)
return True
def file_content_ap(path, nstr):
if os.path.isdir(path):
filelist = filelist4path(path, fn=lambda x:x.find('.podspec')!=-1)
if len(filelist) == 0:
raise ValueError('%s has no .podspec file' % path)
else:
path = os.path.join(path, filelist[0])
if path.rfind('.podspec', -8) == -1:
raise ValueError('%s is not a .podspec file' % path)
log_git('updating %s' % path)
try:
with open(path, 'a') as wf:
wf.write('\n%s' % nstr)
except BaseException:
raise IOError('%s append failed' % path)
return True
__fn_dic__ = {'basepath':basepath,
'filelist4path':filelist4path,
'dirlist4path':dirlist4path,
'maxverdir4path':maxverdir4path,
'max4verdirlist':max4verdirlist,
'file_content_rp':file_content_rp,
'file_content_ap':file_content_ap
}
__fn__ = None
__args__ = []
__kwargs__ = {}
def __handle_arges__():
global __fn__
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
index = arg.find('=')
if index == -1:
__args__.append(arg)
else:
if arg.find('fn', 0, index) != -1:
__fn__ = arg[index + 1:].strip()
__fn__ = __fn_dic__.get(__fn__, None)
else:
__kwargs__[arg[0:index].strip()] = arg[index + 1:].strip()
if __name__ == '__main__':
__handle_arges__()
try:
res = __fn__(*__args__, **__kwargs__) if callable(__fn__) else __fn__
if isinstance(res, list) or isinstance(res, dict):
res = json.dumps(res)
except BaseException as e:
res = 'error:%s' % str(e)
finally:
print(res)
|
yuasabe/iot_api | api/api.py | import flask
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for, jsonify
)
from api.db import get_db
bp = Blueprint('api', __name__, url_prefix='/api')
# app = flask.Flask(__name__)
# app.config["DEBUG"] = True
# db.init_app(app)
@bp.route('/', methods=['GET'])
def home():
return "<h1>IOT API</h1><p>This site is a prototype API for IOT sensing, namely the DHT11 for now.</p>"
@bp.route('/v1/resources/dht11', methods=['POST'])
def api_add():
req_data = request.get_json()
temp = req_data['temp']
humidity = req_data['humidity']
print(temp, humidity)
db = get_db()
error = None
response = {"status":"failure"}
if not temp:
error = "Temp is required"
elif not humidity:
error = "humidity is required"
if error is None:
db.execute(
'INSERT INTO sensor_data (temp, humidity) values (?, ?)', (temp, humidity)
)
db.commit()
response = {"status":"success"}
return response, 200
@bp.route('/v1/resources/dht11', methods=['GET'])
def api_list():
db = get_db()
data = db.execute(
'SELECT * from sensor_data'
).fetchall()
return render_template('api/index.html', data=data)
@bp.errorhandler(404)
def page_not_found(e):
return "<h1>404</h1><p>The resource could not be found.</p>", 404
# app.run()
|
Schnitzel/lagoon-kickstart | make-secrets.py | <reponame>Schnitzel/lagoon-kickstart
#!/usr/bin/env
import random, string, json
def pw(N):
return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(N))
secret = {
"Version": "v1",
"stringData": {
"JENKINS_URL": "https://to.be.determined/",
"JWTSECRET": pw(20),
"RABBITMQ_PASSWORD": pw(10),
"RABBITMQ_USERNAME": pw(10),
"SERVICE_API_ADMIN_TOKEN": pw(80)
},
"kind": "Secret",
"metadata": { "name": "secret-environment" },
"type": "Opaque"
}
with file('secrets', 'w') as f:
json.dump(secret, f, indent=2)
|
BeatrizFS/MongoDB-Python | READ.py | from Arquivo1 import Produto
#READ
#Consultar o Banco de dados
#1.Retorna todas as informações do Banco de dados
produtos = Produto.objects()
print(produtos)
for produto in produtos:
print(produto.Nome, produto.Valor) |
BeatrizFS/MongoDB-Python | DELETE.py | #DELETE
from mongoengine.errors import DoesNotExist
from Arquivo1 import Produto
try:
produto = Produto.objects(Nome="Tilápia").get()
produto.delete()
print("Produto deletado")
except DoesNotExist:
print("Produto não encontrado")
|
BeatrizFS/MongoDB-Python | UPDATE.py | #Atualiza algum atributo de determinado produto.
from mongoengine.errors import DoesNotExist
from Arquivo1 import Produto
try:
produto = Produto.objects(Nome="Arroz")
produto.update(Valor=6.0)
print("Produto atualizado!")
except DoesNotExist:
print("Produto não encontrado")
|
BeatrizFS/MongoDB-Python | Arquivo1.py | from mongoengine.connection import connect
from mongoengine.document import Document
from mongoengine.fields import BooleanField, EmailField, FloatField,IntField, ReferenceField, StringField
from mongoengine import *
# Instituto Federal de Pernambuco - Campus Paulista
# Disciplina: Banco de Dados 2
# Professor: <NAME>
# Alunos: <NAME> e <NAME>
# ---------> Projeto de Fim de Disciplina - CRUD MongoDB e Python<-----------
#______________________________________________________________________________
connect("SuperMercadoBDPY")
class Produto(Document):
Nome = StringField(required=True, max_length=50)
Marca = StringField(required=True)
Valor = FloatField(required=True)
Quantidade = IntField(required=True)
admin = BooleanField(default=False)
registered = BooleanField(default=False)
produto = Produto(
Nome="Feijão",
Marca="Kicaldo",
Valor=10.50,
Quantidade=10,
)
produto.admin = True
produto.registered = True
produto.save()
produto = Produto(
Nome="Arroz",
Marca="Camil",
Valor=5.0,
Quantidade=10,
)
produto.admin = True
produto.registered = True
produto.save()
print("Produtos salvos")
class Cliente(Document):
Nome = StringField(required=True)
Endereço = StringField(required=True, unique=True)
Email = EmailField(unique=True)
CPF = StringField(unique=True)
class Mercado(Document):
Estabelecimento = StringField(required=True)
Endereço = StringField(required = True)
Itens=ReferenceField(Produto)
Mercado(
Estabelecimento = "Atacadão",
Endereço = "Av.PE-15, 321 - Olinda/PE",
Itens= produto,
).save()
print("Mecado salvo")
print("Concluído") |
condensedWeasel/covid-electricity-demand | setup.py | <reponame>condensedWeasel/covid-electricity-demand
#! /usr/bin/env python
#
# Copyright 2020 <NAME> <<EMAIL>>
def get_version():
version_re = re.compile("""__version__[\s]*=[\s]*['|"](.*)['|"]""")
init_file = os.path.join( os.path.dirname(__file__), DISTNAME, "__init__.py" )
with open(init_file) as file:
content = file.read()
match = version_re.search(content)
version = match.group(1)
return version
#Define constants
DISTNAME="covid-electricity-demand"
MAINTAINER="<NAME>"
MAINTAINER_EMAIL="<EMAIL>"
DESCRIPTION="Analysis of electricity useage data"
LICENSE=""
URL="https://github.com/condensedWeasel/covid-electricity-demand"
NUMPY_MIN_VERSION="1.18"
PANDAS_MIN_VERSION="1.0"
MATPLOTLIB_MIN_VERSION="3.1"
PLOTLY_MIN_VERSION="4.9"
SEABORN_MIN_VERSION="0.10"
FP_PROPHET_VERSION=""
import os, sys, re
# Get description from README.md
readme = os.path.join( os.path.dirname(__file__),"README.md" )
LONG_DESCRIPTION = open( readme ).read()
# Version number is contained in package
VERSION = get_version()
def setup_package():
""" Sets up package metadata """
from setuptools import setup, find_packages
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
python_requires=">=3.7",
install_requires=[
'numpy>={}'.format(NUMPY_MIN_VERSION),
'pandas>={}'.format(PANDAS_MIN_VERSION),
'matplotlib>={}'.format(MATPLOTLIB_MIN_VERSION),
'plotly>={}'.format(PLOTLY_MIN_VERSION),
'seaborn>={}'.format(SEABORN_MIN_VERSION),
'fbprophet>={}'.format(FP_PROPHET_VERSION)
],
)
metadata['packages'] = find_packages()
setup(**metadata)
if __name__ == "__main__":
setup_package() |
TristanSalles/pybeach | setup.py | """
Creating PIPY package instruction:
python3 -m pip install --user --upgrade setuptools wheel
python3 setup.py sdist
python3 -m pip install --user --upgrade twine
twine check dist/*
twine upload dist/*
"""
from setuptools import setup, find_packages
from numpy.distutils.core import setup, Extension
from os import path
import io
this_directory = path.abspath(path.dirname(__file__))
with io.open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
if __name__ == "__main__":
setup(name = 'pybeach',
author = "<NAME>",
author_email = "https://tomasbeuzen.github.io/",
url = "https://github.com/TomasBeuzen/pybeach",
version = "0.1.1",
description = "Coastal Processes, Environments & Systems.",
long_description = long_description,
long_description_content_type='text/markdown',
packages = ['pybeach','pybeach.support','pybeach.classifiers'],
install_requires = [
'numpy>=1.16.3',
'scikit-learn>=0.20.3',
'pandas>=0.25',
'pytz==2019.1',
'scipy>=1.2.1',
'joblib==0.13.2',
],
python_requires = '>=3.7',
# package_data = {'pybeach': ['Notebooks/notebooks/*ipynb',
# 'Notebooks/notebooks/*py'] },
include_package_data = True,
classifiers = ['Programming Language :: Python :: 3.7']
)
|
TristanSalles/pybeach | tests/test_pybeach.py | # -*- coding: utf-8 -*-
"""
This is the unittest for pybeach.
"""
import numpy as np
from sklearn.ensemble.forest import RandomForestClassifier
from pytest import approx, raises, fixture
from pybeach.beach import Profile
from pybeach.support import classifier_support as cs
@fixture()
def models():
x = np.arange(0, 80, 0.5)
z = np.hstack((np.linspace(4, 5, 40),
np.linspace(5, 2, 10),
np.linspace(2, 0, 91)[1:],
np.zeros((20,))))
toe = np.array([51])
crest = np.array([38])
shoreline = np.array([140])
pybeach1d = Profile(x, z)
pybeach2d = Profile(x, np.vstack((z, z)))
return pybeach1d, pybeach2d, toe, crest, shoreline
@fixture()
def data():
x = np.arange(0, 80, 0.5)
z1d = np.hstack((np.linspace(4, 5, 40),
np.linspace(5, 2, 10),
np.linspace(2, 0, 91)[1:],
np.zeros((20,))))
z2d = np.vstack((z1d, z1d))
toe2d = np.array([51, 51])
return x, z1d, z2d, toe2d
class Testpydune(object):
def test_predict_dunetoe_ml(self, models):
pydune1d, pydune2d, toe, crest, shoreline = models
assert pydune1d.predict_dunetoe_ml('barrier_island_clf')[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_ml('wave_embayed_clf')[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_ml('mixed_clf')[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_ml('mixed_clf', dune_crest=40)[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_ml('mixed_clf', dune_crest=None)[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_ml('mixed_clf', dune_crest=np.array([40]))[0] == approx(toe, abs=10)
# assert pydune2d.predict_dunetoe_ml('SR04_clf')[0] == approx(np.hstack((toe, toe)), abs=10)
def test_predict_dunetoe_mc(self, models):
pydune1d, pydune2d, toe, crest, shoreline = models
assert pydune1d.predict_dunetoe_mc(dune_crest='max') == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_mc(dune_crest='max', hanning_window=3) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_mc(dune_crest=40)[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_mc(dune_crest=None)[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_mc(dune_crest=np.array([40]))[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_mc(shoreline=False) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_mc(shoreline=159) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_mc(shoreline=np.array([159])) == approx(toe, abs=10)
assert pydune2d.predict_dunetoe_mc(dune_crest='rr') == approx(np.hstack((toe, toe)), abs=10)
assert pydune1d.predict_dunetoe_mc(dune_crest='max') == approx(toe, abs=10)
assert pydune2d.predict_dunetoe_mc(dune_crest='rr') == approx(np.hstack((toe, toe)), abs=10)
def test_predict_dunetoe_rr(self, models):
pydune1d, pydune2d, toe, crest, shoreline = models
assert pydune1d.predict_dunetoe_rr() == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(toe_threshold=0.1) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(toe_threshold=0.01) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(dune_crest='max') == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(dune_crest=40)[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(dune_crest=None)[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(dune_crest=np.array([40]))[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(shoreline=True) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(shoreline=False) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(shoreline=159) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_rr(shoreline=np.array([159])) == approx(toe, abs=10)
assert pydune2d.predict_dunetoe_rr() == approx(np.hstack((toe, toe)), abs=10)
def test_predict_dunetoe_pd(self, models):
pydune1d, pydune2d, toe, crest, shoreline = models
assert pydune1d.predict_dunetoe_pd(dune_crest='max') == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_pd(dune_crest=40)[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_pd(dune_crest=None)[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_pd(dune_crest=np.array([40]))[0] == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_pd(shoreline=False) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_pd(shoreline=159) == approx(toe, abs=10)
assert pydune1d.predict_dunetoe_pd(shoreline=np.array([159])) == approx(toe, abs=10)
assert pydune2d.predict_dunetoe_pd(dune_crest='rr') == approx(np.hstack((toe, toe)), abs=10)
def test_predict_dunecrest(self, models):
pydune1d, pydune2d, toe, crest, shoreline = models
assert pydune1d.predict_dunecrest(method='max') == approx(crest, abs=10)
assert pydune1d.predict_dunecrest(method='rr', threshold=0.99, window_size=[80]) == approx(78, abs=10)
assert pydune2d.predict_dunecrest(method='rr') == approx(np.hstack((crest, crest)), abs=10)
def test_predict_shoreline(self, models):
pydune1d, pydune2d, toe, crest, shoreline = models
assert pydune1d.predict_shoreline(dune_crest='max') == approx(shoreline, abs=10)
assert pydune1d.predict_shoreline(dune_crest=40)[0] == approx(shoreline, abs=10)
assert pydune1d.predict_shoreline(dune_crest=None)[0] == approx(shoreline, abs=10)
assert pydune1d.predict_shoreline(dune_crest=np.array([40]))[0] == approx(shoreline, abs=10)
assert pydune2d.predict_shoreline(dune_crest='rr') == approx(np.hstack((shoreline, shoreline)), abs=10)
class TestpyduneFails(object):
def test_bad_input(self, data):
x, z1d, z2d, toe2d = data
assert isinstance(Profile(x, z2d.T), Profile) # test transposed data
with raises(TypeError): # no input
Profile()
with raises(TypeError): # only one input
Profile(x)
with raises(AssertionError): # list input
Profile(list(x), z1d)
with raises(AssertionError): # list input
Profile(x, list(z1d))
with raises(AssertionError): # string input
Profile('x', z1d)
with raises(AssertionError): # multidimensional x
Profile(z2d, x)
with raises(ValueError): # x and z don't share dimension
Profile(np.arange(10), z1d)
with raises(Warning): # profiles with wrong orientation (sea on left)
Profile(x, np.flipud(z1d))
def test_bad_method_calls(self, models):
pydune1d, _, _, _, _ = models
with raises(Warning):
pydune1d.predict_dunetoe_ml('wave_embayed_clf', bad_key_word=123)
with raises(Warning):
pydune1d.predict_dunetoe_mc(bad_key_word=123)
with raises(Warning):
pydune1d.predict_dunetoe_mc(dune_crest=None, bad_key_word=123)
with raises(Warning):
pydune1d.predict_dunetoe_pd(bad_key_word=123)
with raises(Warning):
pydune1d.predict_dunetoe_pd(dune_crest=None, bad_key_word=123)
with raises(Warning):
pydune1d.predict_dunetoe_rr(dune_crest='rr', bad_key_word=123)
with raises(Warning):
pydune1d.predict_dunetoe_rr(shoreline=True, bad_key_word=123)
with raises(Warning):
pydune1d.predict_shoreline(bad_key_word=123)
with raises(ValueError):
pydune1d.predict_dunetoe_ml('wave_embayed_clf', dune_crest='bad_method')
with raises(ValueError):
pydune1d.predict_dunecrest(method='m')
with raises(ValueError):
pydune1d.predict_dunecrest(method=1)
with raises(ValueError):
pydune1d.predict_dunetoe_ml('wave_embayed_clf', dune_crest='bad_method')
with raises(ValueError):
pydune1d.predict_dunecrest(method="rr", window_size='string')
with raises(ValueError):
pydune1d.predict_dunetoe_mc(shoreline='string')
with raises(ValueError):
pydune1d.predict_dunetoe_mc(dune_crest='bad_method')
with raises(ValueError):
pydune1d.predict_dunetoe_rr(shoreline='string')
with raises(ValueError):
pydune1d.predict_dunetoe_rr(dune_crest='bad_method')
with raises(ValueError):
pydune1d.predict_dunetoe_pd(shoreline='string')
with raises(ValueError):
pydune1d.predict_dunetoe_pd(dune_crest='bad_method')
with raises(ValueError):
pydune1d.predict_dunetoe_rr(toe_window_size='string')
with raises(ValueError):
pydune1d.predict_shoreline(dune_crest='bad')
with raises(AssertionError):
pydune1d.predict_dunetoe_mc(dune_crest='rr', window_size=-1)
with raises(AssertionError):
pydune1d.predict_dunetoe_mc(dune_crest='max', hanning_window=-1)
with raises(AssertionError):
pydune1d.predict_dunetoe_rr(dune_crest='rr', window_size=-1)
with raises(AssertionError):
pydune1d.predict_dunetoe_rr(toe_window_size=-1)
with raises(AssertionError):
pydune1d.predict_dunetoe_rr(toe_window_size=[21, 1000])
with raises(AssertionError):
pydune1d.predict_dunetoe_rr(toe_threshold=-1)
with raises(AssertionError):
pydune1d.predict_dunetoe_rr(water_level='1')
with raises(AssertionError):
pydune1d.predict_dunetoe_ml(1)
with raises(AssertionError):
pydune1d.predict_dunetoe_ml('SR04_clf', -1)
with raises(AssertionError):
pydune1d.predict_dunecrest(method="rr", threshold=1.1)
with raises(AssertionError):
pydune1d.predict_dunecrest(method="rr", threshold=-0.1)
with raises(AssertionError):
pydune1d.predict_dunecrest(method="rr", window_size=-1)
with raises(AssertionError):
pydune1d.predict_dunecrest(method="rr", window_size=[21, 1000])
with raises(FileNotFoundError):
pydune1d.predict_dunetoe_ml('bad_file_name')
class Testpyduneclassifier(object):
def test_make_classifier(self, data):
x, _, z2d, toe2d = data
assert isinstance(cs.create_classifier(x, z2d, toe2d), RandomForestClassifier)
|
harveywwu/OpenData | opendatatools/common/ui_util.py | # -*- coding: UTF-8 -*-
import sys, time
class ShowProcess():
"""
显示处理进度的类
调用该类相关函数即可实现处理进度的显示
"""
i = 0 # 当前的处理进度
max_steps = 0 # 总共需要处理的次数
max_arrow = 50 #进度条的长度
infoDone = 'done'
# 初始化函数,需要知道总共的处理次数
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
# 显示函数,根据当前的处理进度i显示进度
# 效果为[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]100.00%
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps) #计算显示多少个'>'
num_line = self.max_arrow - num_arrow #计算显示多少个'-'
percent = self.i * 100.0 / self.max_steps #计算完成进度,格式为xx.xx%
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r' #带输出的字符串,'\r'表示不换行回到最左边
print process_bar #打印字符到终端
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
if __name__=='__main__':
max_steps = 100
process_bar = ShowProcess(max_steps, 'OK')
for i in range(max_steps):
process_bar.show_process()
time.sleep(0.1) |
harveywwu/OpenData | opendatatools/usstock/usstock_interface.py | <gh_stars>0
# encoding: utf-8
from .usstock_agent import USStockAgent
usstock_agent = USStockAgent()
def set_proxies(proxies):
return usstock_agent.set_proxies(proxies)
def get_symbols():
return usstock_agent.get_symbols()
def get_daily(symbol, start_date = None, end_date = None):
return usstock_agent.get_daily(symbol, start_date, end_date)
def get_dividend(symbol, start_date = None, end_date = None):
return usstock_agent.get_dividend(symbol, start_date, end_date)
def get_split(symbol, start_date = None, end_date = None):
return usstock_agent.get_split(symbol, start_date, end_date) |
harveywwu/OpenData | opendatatools/aemo/aemo_interface.py | <filename>opendatatools/aemo/aemo_interface.py
# encoding: utf-8
from .aemo_agent import AEMOAgent
import pandas as pd
import datetime
aemo_agent = AEMOAgent()
def monthlist(start_date, end_date):
total_months = lambda dt: dt.month + 12 * dt.year
mlist = []
for tot_m in xrange(total_months(start_date)-1, total_months(end_date)):
y, m = divmod(tot_m, 12)
mlist.append(datetime.datetime(y, m+1, 1).strftime("%Y%m"))
return mlist
def get_curr_price_demand(region):
return aemo_agent.get_curr_price_demand(region)
def get_hist_price_demand(region = 'NSW', start_date = None, end_date = None):
if end_date == None:
end_date = datetime.date.today() - datetime.timedelta(days=30)
elif type(end_date).__name__ != 'date':
end_date = end_date.replace('/', '')
end_date = end_date.replace('-', '')
end_date = datetime.datetime.strptime(end_date, "%Y%m%d")
if start_date == None:
start_date = end_date - datetime.timedelta(days=30)
elif type(start_date).__name__ != 'date':
start_date = start_date.replace('/', '')
start_date = start_date.replace('-', '')
start_date = datetime.datetime.strptime(start_date, "%Y%m%d")
contmth_list = monthlist(start_date, end_date)
df_list = pd.DataFrame()
for contmth in contmth_list:
df, msg = aemo_agent.get_hist_price_demand(region, contmth)
if len(df)>0:
df_list = df_list.append(df)
if len(df_list) > 0:
start_time = datetime.datetime.combine(start_date, datetime.time(0,0))
end_time = datetime.datetime.combine(end_date + datetime.timedelta(days=1), datetime.time(0,0))
df_list = df_list[(df_list['SETTLEMENTDATE']>start_time)&(df_list['SETTLEMENTDATE']<=end_time)]
df_list.reset_index(drop=True, inplace = True)
msg = ""
else:
msg = "No data loaded"
return df_list, msg
|
harveywwu/OpenData | opendatatools/aemo/aemo_agent.py | # encoding: utf-8
from opendatatools.common import RestAgent
import io
import pandas as pd
import datetime
class AEMOAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_curr_price_demand(self, region):
url = "http://www.nemweb.com.au/mms.GRAPHS/GRAPHS/GRAPH_30{region}1.csv".format(region = region)
res = self.do_request(url)
if res:
df = pd.read_csv(io.StringIO(res.decode('utf-8')))
df['SETTLEMENTDATE'] = df['SETTLEMENTDATE'].apply(lambda x: datetime.datetime.strptime(x, "%Y/%m/%d %H:%M:%S"))
msg = ""
else:
df = pd.DataFrame()
msg = "Data Link Error"
return df, msg
def get_hist_price_demand(self, region, cont_mth):
if '-' in cont_mth:
cont_mth = cont_mth.replace('-', '')
elif '/' in cont_mth:
cont_mth = cont_mth.replace('/', '')
elif type(cont_mth).__name__ == 'date':
cont_mth = str(cont_mth.year * 100 + cont_mth.month)
if len(cont_mth)>6:
cont_mth = cont_mth[:6]
url = "http://www.nemweb.com.au/mms.GRAPHS/data/DATA{contmth}_{region}1.csv".format(contmth = cont_mth, \
region = region)
res = self.do_request(url)
if res:
df = pd.read_csv(io.StringIO(res.decode('utf-8')))
df['SETTLEMENTDATE'] = df['SETTLEMENTDATE'].apply(lambda x: datetime.datetime.strptime(x, "%Y/%m/%d %H:%M:%S"))
msg = ""
else:
df = pd.DataFrame()
msg = "Data is not available"
return df, msg |
harveywwu/OpenData | opendatatools/spot/spot_agent.py | # encoding: utf-8
import requests
from PIL import Image
import pytesseract
import io
import pandas as pd
from bs4 import BeautifulSoup
from datetime import datetime
from opendatatools.common import RestAgent
dict_commodity_spot_indicator = {
'65': '钢材指数',
'61': '铁矿指数',
'64': '焦炭指数',
'1002': '煤炭指数',
'1003': '水泥指数',
'1100': 'FTZ指数',
'118': '钢铁行业PMI指数',
'119': '钢铁行业PMI生产指数',
'120': '钢铁行业PMI新订单指数',
'121': '钢铁行业PMI新出口订单指数',
'122': '钢铁行业PMI产成品库存指数',
'123': '钢铁行业PMI原材料库存指数',
'74': '沪市终端线螺每周采购量监控',
'72': '沪螺纹钢社会库存',
'67': '国内螺纹钢社会库存量',
'68': '国内线材社会库存量',
'69': '国内主要城市热轧卷板库存',
'70': '国内主要城市冷轧卷板库存',
'73': '国内主要城市中厚板库存',
'117': '全国主要钢材品种库存总量',
'108': '热轧价格走势',
'109': '冷轧价格走势',
'110': '中板价格走势',
'111': '型材价格走势',
'127': '沪二级螺纹钢价格走势',
'99': '重点企业粗钢日均产量(旬报)',
'124': '重点企业钢材库存量(旬报)',
'159': '国内月度粗钢日均产量',
'35': '国内月度粗钢产量',
'88': '国内月度钢材产量',
'40': '国内月度螺纹钢产量',
'41': '国内月度线材产量',
'114': '国内月度热轧板卷产量',
'115': '国内月度冷轧板卷产量',
'116': '国内月度中厚板产量',
'177': '国内月度生铁产量',
'37': '国内月度焦炭产量',
'36': '国内月度铁矿石原矿产量',
'42': '国内月度铁矿石进口量',
'38': '国内月度钢材出口量',
'39': '国内月度钢材进口量',
'43': '国内铁矿石港口存量',
'161': '唐山地区钢坯库存量',
'100': '印度矿港口库存',
'77': '波罗的海干散货指数(BDI)',
'78': '废钢价格走势',
'79': '钢坯价格走势',
'178': '钢材成本指数',
'93': '铁矿石进口月度均价',
'94': '巴西图巴朗-北仑铁矿海运价',
'95': '西澳-北仑铁矿海运价',
'1006': '澳大利亚粉矿价格(56.5%,日照港)',
'106': '澳大利亚粉矿价格(61.5%青岛港,元/吨)',
'107': '巴西粉矿价格( 65% 日照港,元/吨)',
'125': '62%铁矿石指数',
'126': '63.5%印度粉矿外盘报价',
'162': '全球粗钢月度产量(万吨)',
'163': '全球粗钢日均产量(万吨)',
'164': '全球粗钢产能利用率(%)',
}
class SpotAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
@staticmethod
def clear_text(text):
return text.replace('\n', '').strip()
def get_captcha(self):
url = 'http://www.96369.net/Other/ValidateCode.aspx'
response = self.do_request(url, method='GET', type='binary')
img = Image.open(io.BytesIO(response))
img = img.convert('L')
# 二值化
threshold = 160
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
# 由于都是数字
# 对于识别成字母的 采用该表进行修正
rep = {'O': '0',
'I': '1', 'L': '1',
'Z': '2',
'S': '8'
}
img = img.point(table, '1')
img = img.convert('RGBA')
rsp = pytesseract.image_to_string(img, config='-psm 6')
try:
answer = eval(rsp)
return answer, self.get_cookies()
except:
return None, None
def get_commodity_spot_indicator(self):
df = pd.DataFrame.from_dict(dict_commodity_spot_indicator, orient='index')
df.columns=['indicator_name']
df = df.rename_axis('indicator_id').reset_index()
return df
def get_commodity_spot_indicator_data(self, indicator_id, start_date, end_date):
url = 'http://www.96369.net/indices/%s' % indicator_id
captcha, cookies = self.get_captcha()
end_date = datetime.now().strftime('%Y-%m-%d')
data = {
'txtStartTime': '2000-01-01',
'txtEndTime': end_date,
'txtyzcode': captcha
}
response = self.do_request(url, param=data, cookies=cookies)
if response is None:
return None, '获取数据失败'
soup = BeautifulSoup(response, "html5lib")
divs = soup.find_all('div')
data = []
for div in divs:
if div.has_attr('class') and 'wll-commodity' in div['class']:
tables = div.find_all('table')
for table in tables:
if table.has_attr('class') and 'mod_tab' in table['class']:
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 4:
date = SpotAgent.clear_text(cols[0].text)
value = SpotAgent.clear_text(cols[1].text)
change = SpotAgent.clear_text(cols[2].text)
chg_pct = SpotAgent.clear_text(cols[3].text)
data.append({
"date": date,
"value": value,
"change": change,
"chg_pct": chg_pct,
})
return pd.DataFrame(data), ''
|
harveywwu/OpenData | opendatatools/spot/spot_interface.py | <reponame>harveywwu/OpenData<gh_stars>0
# encoding: utf-8
import datetime
from .spot_agent import SpotAgent
spot_agent = SpotAgent()
def get_commodity_spot_indicator():
return spot_agent.get_commodity_spot_indicator()
def get_commodity_spot_indicator_data(indicator_id, start_date = None, end_date = None):
if end_date == None:
end_date = datetime.date.today()
if start_date == None:
start_date = end_date - datetime.timedelta(days = 365)
return spot_agent.get_commodity_spot_indicator_data(indicator_id, start_date, end_date)
|
harveywwu/OpenData | opendatatools/futures/futures_agent.py | # encoding: utf-8
from opendatatools.common import RestAgent, split_date, date_convert, remove_chinese
import pandas as pd
import json
import copy
import zipfile
import io
import re
from xml.etree import ElementTree
SHF_name_map = {"CJ1": "volume",
"CJ1_CHG": "volume_chg",
"PARTICIPANTABBR1": "volume_fut_broker",
"CJ2": "long_oi",
"CJ2_CHG": "long_oi_chg",
"PARTICIPANTABBR2": "long_fut_broker",
"CJ3": "short_oi",
"CJ3_CHG": "short_oi_chg",
"PARTICIPANTABBR3": "short_fut_broker",
"RANK": "rank",
"INSTRUMENTID": "symbol",
}
def format_field(x):
if type(x) == str:
return x.replace('\n', '').strip()
else:
return x
def _merge_df(df_list):
df_result = None
for df in df_list:
if df_result is None:
df_result = df
else:
df_result = pd.merge(df_result, df, left_index=True, right_index=True)
return df_result
def _concat_df(df_list):
return pd.concat(df_list, sort = True)
def _rename_df(df):
name_map = {
"会员简称" : "期货公司",
"(手)" : "",
}
for col in df.columns:
for name, value in name_map.items():
if name in col:
col_new = col.replace(name, value)
df.rename(columns={col: col_new}, inplace=True)
class SHFAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
# date : %Y%m%d
def get_trade_rank(self, date):
url = 'http://www.shfe.com.cn/data/dailydata/kx/pm%s.dat' % date_convert(date, '%Y-%m-%d', '%Y%m%d')
response = self.do_request(url, None)
rsp = json.loads(response)
code = rsp['o_code']
msg = rsp['o_msg']
if code != 0:
return None, msg
if 'report_date' in rsp.keys():
date = rsp['report_date']
else:
date = date_convert(date, '%Y-%m-%d', "%Y%m%d")
records = rsp['o_cursor']
df = pd.DataFrame(records)
df['date'] = date
for col in df.columns:
df[col] = df[col].apply(lambda x : format_field(x))
df['RANK'] = df['RANK'].apply(lambda x: int(x))
df = df[(df['RANK']>0) & (df['RANK']<=20)]
df.rename(columns=SHF_name_map, inplace=True)
df = df[list(SHF_name_map.values())]
return df, ""
class DCEAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
'''
名次 会员简称 成交量 增减
1 海通期货 24,326 9,991
2 中信期货 12,926 5,960
3 兴证期货 12,835 4,405
4 西南期货 11,054 6,614
'''
def _parse_trade_file(self, file, date):
filename = file.name
name_items = filename.split("_")
symbol = name_items[1]
lines = file.readlines()
df_list = []
if date >'2015-12-31':
charset = 'utf-8'
else:
charset = 'gbk'
for i in range(len(lines)):
items = lines[i].split()
if len(items) == 4 and items[0] == '名次':
head = items
if items[2] == '成交量':
ticker = 'volume'
ticker2 = 'volume'
elif items[2] == '持买单量':
ticker = 'long'
ticker2 = 'long_oi'
elif items[2] == '持卖单量':
ticker = 'short'
ticker2 = 'short_oi'
else:
print "unknown rank = %s" % items[2].decode('utf-8')
col_names = ['rank', ticker + '_fut_broker', ticker2, ticker2 + '_chg']
head[1] = head[2] + head[1]
head[3] = head[2] + head[3]
data = []
for j in range(20):
i = i + 1
items = lines[i].decode(charset).split()
if (len(items) < 1) or (items[0] == '总计'):
break
data.append(items)
if data == []:
data.append(['', '', '', ''])
df = pd.DataFrame(data)
if len(df.columns) == 4:
df.columns = col_names
df.set_index('rank', inplace=True)
df_list.append(df)
df_result = _merge_df(df_list)
df_result['symbol'] = symbol
return df_result
def get_trade_rank(self, date):
url = 'http://www.dce.com.cn/publicweb/quotesdata/exportMemberDealPosiQuotesBatchData.html'
year, month, day = split_date(date, '%Y-%m-%d')
data = {
"year": year,
"month": month - 1, # 脑残程序员的设计
"day": day,
"batchExportFlag": 'batch',
}
response = self.do_request(url, data, "POST", type='binary')
zip_ref = zipfile.ZipFile(io.BytesIO(response))
df_list = []
for finfo in zip_ref.infolist():
file = zip_ref.open(finfo, 'r')
df = self._parse_trade_file(file, date)
df_list.append(df)
df_result = _concat_df(df_list)
df_result['date'] = date
df_result.reset_index(level=['rank'], inplace=True)
_rename_df(df_result)
return df_result, ""
class CZCAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
'''
品种:苹果AP 日期: 2018-05-30
名次 |会员简称 |成交量(手)|增减量 |会员简称 |持买仓量 |增减量 |会员简称 |持卖仓量 |增减量
1 |海通期货 |157,955 |-2,663 |海通期货 |42,771 |3,342 |海通期货 |43,889 |1,843
2 |招商期货 |67,527 |-12,527 |华泰期货 |21,091 |-1,093 |华泰期货 |20,927 |-1,991
3 |徽商期货 |66,887 |-29,680 |招商期货 |17,302 |-3,063 |招商期货 |20,519 |-2,788
4 |光大期货 |66,322 |-10,134 |永安期货 |17,193 |784 |中信期货 |15,779 |716
'''
def _get_url_by_date(self, date):
year, month, day = split_date(date, '%Y-%m-%d')
date_int = int(date_convert(date, '%Y-%m-%d', "%Y%m%d"))
url = 'http://old.czce.com.cn/portal/DFSStaticFiles/Future/%d/%d/FutureDataHolding.txt'
url_old = 'http://old.czce.com.cn/portal/exchange/%d/datatradeholding/%d.txt'
if date < '2015-10-01':
return url_old % (year, date_int)
else:
return url % (year, date_int)
def _get_code(self, text):
items = re.split(":| |\t|\r\n|", text)
return items[1]
def _get_head(self, text):
items = self._split_field(text)
items[1] = '成交量' + items[1]
items[3] = '成交量增减'
items[4] = '持买仓量' + items[4]
items[6] = '持买仓量增减'
items[7] = '持卖仓量' + items[7]
items[9] = '持卖仓量增减'
return items
def _get_head_old(self, text):
items = ['名次', '成交量期货公司', '成交量', '成交量增减', '持买仓量期货公司', '持买仓量', '持买仓量增减', '持卖仓量期货公司', '持卖仓量', '持卖仓量增减']
return items
def get_head(self, text, old):
if old == True:
return self._get_head_old(text)
else:
return self._get_head(text)
def _get_data(self, lines, old):
if old == True:
sep = ','
else:
sep = '|'
data = []
for line in lines:
items = self._split_field(line, splitter=sep)
data.append(items)
return data
def _split_field(self, text, splitter = "|"):
items = text.split(splitter)
result = [x.encode('utf-8').strip().replace('\r\n', '') for x in items]
return result
def _parse_trade_file_old(self, file):
lines = file.readlines()
df_list = []
for i in range(len(lines)):
items = self._split_field(lines[i], splitter = ',')
if items[0][0:2] == '合约':
code = self._get_code(lines[i])
heads = self.get_head(lines[i], old=True)
a = i
while True:
a += 1
items = self._split_field(lines[a], splitter=',')
# print(items)
if items[0] == '合计':
data = self._get_data(lines[i + 1:a + 1], old=True)
break
df = pd.DataFrame(data)
df.columns = heads
df['symbol'] = remove_chinese(code)
df_list.append(df)
df_result = _concat_df(df_list)
return df_result
def _parse_trade_file(self, file):
lines = file.readlines()
df_list = []
for i in range(len(lines)):
items = self._split_field(lines[i], splitter='|')
if len(items) == 10 and items[0] == '名次':
code = self._get_code(lines[i-1])
heads = self.get_head(lines[i], old=False)
a = i
while True:
a += 1
items = self._split_field(lines[a], splitter='|')
# print(items)
if items[0] == '合计':
data = self._get_data(lines[i + 1:a + 1], old=False)
break
df = pd.DataFrame(data)
df.columns = heads
df['symbol'] = remove_chinese(code)
df_list.append(df)
df_result = _concat_df(df_list)
return df_result
def parse_trade_file(self, file, date):
if date < '2015-10-01':
return self._parse_trade_file_old(file)
else:
return self._parse_trade_file(file)
def get_trade_rank(self, date):
url = self._get_url_by_date(date)
response = self.do_request(url, None, "GET", type='binary')
df = self.parse_trade_file(io.StringIO(response.decode('gbk')), date)
df['date'] = date
_rename_df(df)
return df, ""
class CFEAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_trade_rank(self, date):
if date < '2018-01-01':
print("CFE网站改版,暂不兼容旧版,目前此接口仅支持2018年以后数据获取")
return None, '网站改版,暂不兼容'
products = ['T', 'IF', 'IC', 'IH', 'TF']
df_list = []
for product in products:
df = self._get_trade_rank_by_product(date, product)
df_list.append(df)
df = _concat_df(df_list)
df['date'] = date
df.reset_index(level=[0, 1], inplace=True)
return df, ""
def _get_trade_rank_by_product(self, date, product):
url = 'http://www.cffex.com.cn/sj/ccpm/%04d%02d/%02d/%s.xml'
year, month, day = split_date(date, '%Y-%m-%d')
url = url % (year, month, day, product)
response = self.do_request(url, None, "GET")
root = ElementTree.fromstring(response.encode('utf-8'))
data_list = []
for dataElements in root:
if dataElements.tag != 'data':
continue
data = {}
for subElement in dataElements:
key = subElement.tag
value = subElement.text
if key in ['instrumentid', 'datatypeid', 'rank', 'shortname', 'volume', 'varvolume']:
data[key] = value
data_list.append(data)
df = pd.DataFrame(data_list)
datatype_map = {
"0" : "成交量",
"1" : "持买单量",
"2" : "持卖单量",
}
df_list = []
for type, name in datatype_map.items():
df_tmp = df[df['datatypeid'] == type].copy()
df_tmp['rank'] = df_tmp['rank'].apply(lambda x: int(x))
df_tmp.rename(columns={"instrumentid" : "symbol"}, inplace=True)
df_tmp.rename(columns={"rank" : "名次"}, inplace=True)
df_tmp.rename(columns={"shortname" : name + "期货公司"}, inplace=True)
df_tmp.rename(columns={"volume": name}, inplace=True)
df_tmp.rename(columns={"varvolume": name + "增减"}, inplace=True)
df_tmp.drop(['datatypeid'], axis=1, inplace=True)
df_tmp.set_index(['symbol','名次'], inplace=True)
df_list.append(df_tmp)
return _merge_df(df_list)
class SinaFuturesAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.dict_market_map = {
'沪' : 'SHF',
'连' : 'DCE',
'郑' : 'CZC',
'油' : 'INE',
}
def convert_market(self, market, product=''):
if product == '原油':
return 'INE'
else:
return self.dict_market_map[market]
def _parse_quote_rsp(self, rsp):
lines = rsp.split('\n')
list_quotes = []
for line in lines:
quote = self._parse_quote_str(line)
if quote is not None:
list_quotes.append(quote)
return pd.DataFrame(list_quotes)
def _is_cfe_code(self, code):
code2 = code
for i in range(10):
code2 = code2.replace(str(i), '')
if code2 in ['IF', 'IC', 'IH', 'T', 'TF']:
return True
else:
return False
def _parse_quote_str(self, line):
line = line.replace('var hq_str_', '')
line = line.replace('CFF_RE_', '')
items = line.split('=')
if len(items) < 2:
return None
code = items[0]
quote_str = items[1].replace('"', '').replace(';', '')
fields = quote_str.split(',')
if self._is_cfe_code(code):
pass
else:
if len(fields) < 18 :
return None
quote = {
'code' : code,
'instname' : fields[0],
'time' : fields[1],
'open' : fields[2],
'high' : fields[3],
'low' : fields[4],
'preclose' : fields[5],
'bidprice1': fields[6],
'askprice1': fields[7],
'last' : fields[8],
'settle' : fields[9],
'presettle' : fields[10],
'askvol1' : fields[11],
'bidvol1' : fields[12],
'oi' : fields[13],
'volume' : fields[14],
'exchange' : self.convert_market(fields[15]),
'product' : fields[16],
'date' : fields[17],
}
return quote
def get_quote(self, codes):
url = 'http://hq.sinajs.cn/list=%s' % codes
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
df = self._parse_quote_rsp(response)
return df, ''
# 1m, 5m, 15m, 30m, 60m, 1d
def get_kline(self, type, code):
if self._is_cfe_code(code):
if type not in ['5m', '15m', '60m', '1d']:
return None, '不支持的K线类型'
if type == '1d':
url = 'http://stock2.finance.sina.com.cn/futures/api/json.php/CffexFuturesService.getCffexFuturesDailyKLine?symbol=%s' % (code)
else:
url = 'http://stock2.finance.sina.com.cn/futures/api/json.php/CffexFuturesService.getCffexFuturesMiniKLine%s?symbol=%s' % (type, code)
else:
if type not in ['5m', '15m', '30m', '60m', '1d']:
return None, '不支持的K线类型'
if type == '1d':
url = 'http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesDailyKLine?symbol=%s' % (code)
else:
url = 'http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesMiniKLine%s?symbol=%s' % (type, code)
response = self.do_request(url)
if response is None or response == 'null':
return None, '获取数据失败'
jsonobj = json.loads(response)
df = pd.DataFrame(jsonobj)
df.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume']
return df, ''
|
harveywwu/OpenData | opendatatools/aemo/__init__.py | # encoding: UTF-8
from .aemo_interface import *
__all__ = ['get_curr_price_demand', 'get_hist_price_demand']
|
OdyX/synapse | synapse/config/tls.py | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import warnings
from datetime import datetime
from hashlib import sha256
from unpaddedbase64 import encode_base64
from OpenSSL import crypto
from synapse.config._base import Config
logger = logging.getLogger()
class TlsConfig(Config):
def read_config(self, config):
acme_config = config.get("acme", None)
if acme_config is None:
acme_config = {}
self.acme_enabled = acme_config.get("enabled", False)
self.acme_url = acme_config.get(
"url", u"https://acme-v01.api.letsencrypt.org/directory"
)
self.acme_port = acme_config.get("port", 80)
self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
self._original_tls_fingerprints = config["tls_fingerprints"]
self.tls_fingerprints = list(self._original_tls_fingerprints)
self.no_tls = config.get("no_tls", False)
# This config option applies to non-federation HTTP clients
# (e.g. for talking to recaptcha, identity servers, and such)
# It should never be used in production, and is intended for
# use only when running tests.
self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
"use_insecure_ssl_client_just_for_testing_do_not_use"
)
self.tls_certificate = None
self.tls_private_key = None
def is_disk_cert_valid(self):
"""
Is the certificate we have on disk valid, and if so, for how long?
Returns:
int: Days remaining of certificate validity.
None: No certificate exists.
"""
if not os.path.exists(self.tls_certificate_file):
return None
try:
with open(self.tls_certificate_file, 'rb') as f:
cert_pem = f.read()
except Exception:
logger.exception("Failed to read existing certificate off disk!")
raise
try:
tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
except Exception:
logger.exception("Failed to parse existing certificate off disk!")
raise
# YYYYMMDDhhmmssZ -- in UTC
expires_on = datetime.strptime(
tls_certificate.get_notAfter().decode('ascii'), "%Y%m%d%H%M%SZ"
)
now = datetime.utcnow()
days_remaining = (expires_on - now).days
return days_remaining
def read_certificate_from_disk(self):
"""
Read the certificates from disk.
"""
self.tls_certificate = self.read_tls_certificate(self.tls_certificate_file)
# Check if it is self-signed, and issue a warning if so.
if self.tls_certificate.get_issuer() == self.tls_certificate.get_subject():
warnings.warn(
(
"Self-signed TLS certificates will not be accepted by Synapse 1.0. "
"Please either provide a valid certificate, or use Synapse's ACME "
"support to provision one."
)
)
if not self.no_tls:
self.tls_private_key = self.read_tls_private_key(self.tls_private_key_file)
self.tls_fingerprints = list(self._original_tls_fingerprints)
# Check that our own certificate is included in the list of fingerprints
# and include it if it is not.
x509_certificate_bytes = crypto.dump_certificate(
crypto.FILETYPE_ASN1, self.tls_certificate
)
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
if sha256_fingerprint not in sha256_fingerprints:
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
def default_config(self, config_dir_path, server_name, **kwargs):
base_key_name = os.path.join(config_dir_path, server_name)
tls_certificate_path = base_key_name + ".tls.crt"
tls_private_key_path = base_key_name + ".tls.key"
# this is to avoid the max line length. Sorrynotsorry
proxypassline = (
'ProxyPass /.well-known/acme-challenge '
'http://localhost:8009/.well-known/acme-challenge'
)
return (
"""\
# PEM-encoded X509 certificate for TLS.
# This certificate, as of Synapse 1.0, will need to be a valid and verifiable
# certificate, signed by a recognised Certificate Authority.
#
# See 'ACME support' below to enable auto-provisioning this certificate via
# Let's Encrypt.
#
tls_certificate_path: "%(tls_certificate_path)s"
# PEM-encoded private key for TLS
tls_private_key_path: "%(tls_private_key_path)s"
# ACME support: This will configure Synapse to request a valid TLS certificate
# for your configured `server_name` via Let's Encrypt.
#
# Note that provisioning a certificate in this way requires port 80 to be
# routed to Synapse so that it can complete the http-01 ACME challenge.
# By default, if you enable ACME support, Synapse will attempt to listen on
# port 80 for incoming http-01 challenges - however, this will likely fail
# with 'Permission denied' or a similar error.
#
# There are a couple of potential solutions to this:
#
# * If you already have an Apache, Nginx, or similar listening on port 80,
# you can configure Synapse to use an alternate port, and have your web
# server forward the requests. For example, assuming you set 'port: 8009'
# below, on Apache, you would write:
#
# %(proxypassline)s
#
# * Alternatively, you can use something like `authbind` to give Synapse
# permission to listen on port 80.
#
acme:
# ACME support is disabled by default. Uncomment the following line
# to enable it.
#
# enabled: true
# Endpoint to use to request certificates. If you only want to test,
# use Let's Encrypt's staging url:
# https://acme-staging.api.letsencrypt.org/directory
#
# url: https://acme-v01.api.letsencrypt.org/directory
# Port number to listen on for the HTTP-01 challenge. Change this if
# you are forwarding connections through Apache/Nginx/etc.
#
# port: 80
# Local addresses to listen on for incoming connections.
# Again, you may want to change this if you are forwarding connections
# through Apache/Nginx/etc.
#
# bind_addresses: ['::', '0.0.0.0']
# How many days remaining on a certificate before it is renewed.
#
# reprovision_threshold: 30
# If your server runs behind a reverse-proxy which terminates TLS connections
# (for both client and federation connections), it may be useful to disable
# All TLS support for incoming connections. Setting no_tls to True will
# do so (and avoid the need to give synapse a TLS private key).
#
# no_tls: True
# List of allowed TLS fingerprints for this server to publish along
# with the signing keys for this server. Other matrix servers that
# make HTTPS requests to this server will check that the TLS
# certificates returned by this server match one of the fingerprints.
#
# Synapse automatically adds the fingerprint of its own certificate
# to the list. So if federation traffic is handled directly by synapse
# then no modification to the list is required.
#
# If synapse is run behind a load balancer that handles the TLS then it
# will be necessary to add the fingerprints of the certificates used by
# the loadbalancers to this list if they are different to the one
# synapse is using.
#
# Homeservers are permitted to cache the list of TLS fingerprints
# returned in the key responses up to the "valid_until_ts" returned in
# key. It may be necessary to publish the fingerprints of a new
# certificate and wait until the "valid_until_ts" of the previous key
# responses have passed before deploying it.
#
# You can calculate a fingerprint from a given TLS listener via:
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
# or by checking matrix.org/federationtester/api/report?server_name=$host
#
tls_fingerprints: []
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
"""
% locals()
)
def read_tls_certificate(self, cert_path):
cert_pem = self.read_file(cert_path, "tls_certificate")
return crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
def read_tls_private_key(self, private_key_path):
private_key_pem = self.read_file(private_key_path, "tls_private_key")
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
|
OdyX/synapse | synapse/app/frontend_proxy.py | <filename>synapse/app/frontend_proxy.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.errors import SynapseError
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.crypto import context_factory
from synapse.http.server import JsonResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
from synapse.rest.client.v2_alpha._base import client_v2_patterns
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.frontend_proxy")
class PresenceStatusStubServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
def __init__(self, hs):
super(PresenceStatusStubServlet, self).__init__(hs)
self.http_client = hs.get_simple_http_client()
self.auth = hs.get_auth()
self.main_uri = hs.config.worker_main_http_uri
@defer.inlineCallbacks
def on_GET(self, request, user_id):
# Pass through the auth headers, if any, in case the access token
# is there.
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
headers = {
"Authorization": auth_headers,
}
result = yield self.http_client.get_json(
self.main_uri + request.uri.decode('ascii'),
headers=headers,
)
defer.returnValue((200, result))
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
yield self.auth.get_user_by_req(request)
defer.returnValue((200, {}))
class KeyUploadServlet(RestServlet):
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super(KeyUploadServlet, self).__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.http_client = hs.get_simple_http_client()
self.main_uri = hs.config.worker_main_http_uri
@defer.inlineCallbacks
def on_POST(self, request, device_id):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
if device_id is not None:
# passing the device_id here is deprecated; however, we allow it
# for now for compatibility with older clients.
if (requester.device_id is not None and
device_id != requester.device_id):
logger.warning("Client uploading keys for a different device "
"(logged in as %s, uploading for %s)",
requester.device_id, device_id)
else:
device_id = requester.device_id
if device_id is None:
raise SynapseError(
400,
"To upload keys, you must pass device_id when authenticating"
)
if body:
# They're actually trying to upload something, proxy to main synapse.
# Pass through the auth headers, if any, in case the access token
# is there.
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
headers = {
"Authorization": auth_headers,
}
result = yield self.http_client.post_json_get_json(
self.main_uri + request.uri.decode('ascii'),
body,
headers=headers,
)
defer.returnValue((200, result))
else:
# Just interested in counts.
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
defer.returnValue((200, {"one_time_key_counts": result}))
class FrontendProxySlavedStore(
SlavedDeviceStore,
SlavedClientIpStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
BaseSlavedStore,
):
pass
class FrontendProxyServer(HomeServer):
DATASTORE_CLASS = FrontendProxySlavedStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
KeyUploadServlet(self).register(resource)
# If presence is disabled, use the stub servlet that does
# not allow sending presence
if not self.config.use_presence:
PresenceStatusStubServlet(self).register(resource)
resources.update({
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
"/_matrix/client/v2_alpha": resource,
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
reactor=self.get_reactor()
)
logger.info("Synapse client reader now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix",
password="<PASSWORD>",
globals={"hs": self},
)
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return ReplicationClientHandler(self.get_datastore())
def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse frontend proxy", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.frontend_proxy"
assert config.worker_main_http_uri is not None
setup_logging(config, use_worker_options=True)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
ss = FrontendProxyServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ss.setup()
def start():
ss.config.read_certificate_from_disk()
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
config
)
ss.start_listening(config.worker_listeners)
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)
_base.start_worker_reactor("synapse-frontend-proxy", config)
if __name__ == '__main__':
with LoggingContext("main"):
start(sys.argv[1:])
|
3ating3L3phants/google_translate | lang_basic.py | #!/usr/bin/python
# To authenticate, to Google you will need to
# download your json keyfile from your account.
# Once done, run the following command in a terminal:
# export GOOGLE_APPLICATION_CREDENTIALS="/path/to/keyfile.json"
#
# Alternatively, set the export in your ~/.bashrc or ~/.profile
################################################################
# Imports the Google Cloud client library
from google.cloud import translate
# Instantiates a client
translate_client = translate.Client()
# The text to translate provided by the user
text = raw_input(u'Text to translate? ')
# text = u'What do you mean?'
# The target language as a two letter language code
# (e.g., en, ar, ru, fr, etc.)
target = raw_input(u'Digraph for Language? ')
# target = 'ar'
# Translates text into chosen language
translation = translate_client.translate(
text,
target_language=target)
print(u'Text: {}'.format(text))
print(u'Translation: {}'.format(translation['translatedText']))
|
glintcore/pyglint | test/runtests.py | <reponame>glintcore/pyglint
import sys
import os
package_root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, package_root_path)
import unittest
from pyglint import glint
import argparse
import uuid
#Tests
class GlintTest(unittest.TestCase):
def setUp(self):
self.host = os.environ.get('GLINTHOST')
self.user = os.environ.get('GLINTUSER')
self.passw = os.environ.get('GLINTPASS')
test_dir = os.path.dirname(os.path.abspath(__file__))
resources_dir = os.path.join(test_dir, "resources")
shark_file = os.path.join(resources_dir, "shark.csv")
if not os.path.exists(shark_file):
self.fail("Could not find test CSV file at path %s" % shark_file)
self.shark_file_path = shark_file
self.connection = glint.GlintConnection(self.host, self.user, self.passw)
self.shark_file_name = uuid.uuid1().hex #random UUID prefix
self.shark_glint_file = self.connection.add_file_path(self.shark_file_name,\
self.shark_file_path)
def tearDown(self):
self.shark_glint_file.delete()
def test_check_file_exists(self):
self.connection.verify_file(self.shark_file_name)
def test_get_nonexistant_file(self):
message = ""
try:
self.connection.verify_file("foo-file-fake")
except glint.GlintError as ge:
message = ge.args[0]
if not message.startswith("Could not find file"):
raise Exception("Bad message: '%s'" % message)
def test_retrieve_file(self):
data = self.shark_glint_file.get_data()
data_lines = data.split("\n")
header_row = data_lines[0].split(",")
self.assertTrue(header_row[0] == 'Shark_Number')
self.assertTrue('Location_Code' in header_row)
self.assertTrue('item_length' in header_row)
self.assertTrue(header_row[-1] == 'Comments')
def test_tag_file_metadata(self):
self.shark_glint_file.tag('Shark_Number', 'dc:identifier')
self.shark_glint_file.tag('Gear_Description', 'dc:description')
data = self.shark_glint_file.get_data(with_metadata=True)
header_row = data.split("\n")[0].split(',')
self.assertTrue(header_row[0].endswith('{dc:identifier}'))
self.assertTrue(header_row[2].endswith('{dc:description}'))
def test_retrieve_file_subset(self):
columns = [ 'Location_Code', 'item_length', 'Comments' ]
data = self.shark_glint_file.get_data(columns=columns)
header_row = data.split('\n')[0].split(',')
self.assertTrue(len(header_row) == 3)
def test_retrieve_file_tsv(self):
data = self.shark_glint_file.get_data(data_format='tsv')
data_lines = data.split("\n")
header_row = data_lines[0].split("\t")
self.assertTrue(header_row[0] == 'Shark_Number')
self.assertTrue('Location_Code' in header_row)
self.assertTrue('item_length' in header_row)
self.assertTrue(header_row[-1] == 'Comments')
if __name__ == "__main__":
#parser = argparse.ArgumentParser()
#parser.add_argument("host", help="The hostname of our running Glint server")
##parser.add_argument("user", help="The Glint user to use for testing")
#parser.add_argument("passw", help="The password for our Glint user")
#args = parser.parse_args()
#os.environ['GLINTHOST'] = args.host
#os.environ['GLINTUSER'] = args.user
#os.environ['GLINTPASS'] = args.passw
unittest.main()
|
glintcore/pyglint | pyglint/glint.py | <reponame>glintcore/pyglint
import csv
import requests
from requests.auth import HTTPBasicAuth
class GlintError(Exception):
pass
class GlintConnection:
def __init__(self, host, username, password):
self.host = host
self.username = username
self.password = password
def add_file_string(self, name, data):
try:
self.put_file(name, data)
new_file = GlintFile(name, self)
return new_file
except GlintError as ge:
print("Unable to create new file: %s" % ge)
def add_file_path(self, name, path):
try:
with open(path, "r") as file_handle:
content = file_handle.read()
self.put_file(name, content)
new_file = GlintFile(name, self)
return new_file
except GlintError as ge:
print("Unable to create new file: %s" % ge)
def get_file_list(self):
url = "%s/%s" % (self.host, self.username)
response = requests.get(url)
if response.status_code != 200:
raise GlintError("Got code %s trying to retrieve file listing: %s" %\
(response.status_code, response.text))
csv_string = response.content.decode('utf-8')
csv_lines = csv_string.splitlines()
reader = csv.reader(csv_lines, delimiter=',')
reader_list = list(reader)
file_list = []
reader_list = reader_list[1:] #skip heading
for row in reader_list:
file_list.append(row[0])
return file_list
def verify_file(self, name):
"""
Throw a GlintError if the named file does not exist
for this connection
"""
file_list = self.get_file_list()
if name not in file_list:
raise GlintError("Could not find file %s for this user" % name)
def get_glint_file(self, name):
"""
Get a GlintFile for an existing file, by name
"""
try:
self.verify_file(name)
glint_file = GlintFile(name, self)
return glint_file
except GlintError as ge:
print("Unable to retrieve file: %s" % ge)
def get_auth(self):
auth = HTTPBasicAuth(self.username, self.password)
return auth
def put_file(self, name, data):
auth = self.get_auth()
url = "%s/%s/%s" % (self.host, self.username, name)
data_json = { "data" : data }
response = requests.put(url, json=data_json, auth=auth)
if response.status_code != 200:
raise GlintError("Unable to create data file. Got code %s: %s"\
% (response.status_code, response.text))
return response
def delete_file(self, name):
auth = self.get_auth()
url = "%s/%s/%s" % (self.host, self.username, name)
response = requests.delete(url, auth=auth)
if response.status_code != 204:
raise GlintError("Unable to delete file. Got code %s: %s" %\
(response.status_code, response.text))
return response
def get_file_data(self, name, transform_query=None):
auth = self.get_auth()
if transform_query:
url = "%s/%s/%s?%s" % (self.host, self.username, name, transform_query)
else:
url = "%s/%s/%s" % (self.host, self.username, name)
response = requests.get(url, auth=auth)
if response.status_code != 200:
raise GlintError("Unable to retrieve file %s: %s" % (name, response.text))
return response
def put_metadata(self, name, attribute, element):
auth = self.get_auth()
url = "%s/%s/%s.%s" % (self.host, self.username, name, attribute)
payload = { "metadata" : element }
response = requests.put(url, json=payload, auth=auth)
if response.status_code != 200:
raise GlintError("Got error response %s when tagging dataset %s: %s" \
% (response.status_code, name, response.text))
class GlintFile:
def __init__(self, name, connection):
self.name = name
self.connection = connection
def get_data(self, columns=None, data_format=None, with_metadata=False):
transform_query = ""
if columns:
transform_query = "show(%s)" % ",".join(columns)
if data_format:
transform_query = "%sas(%s)" % (transform_query, data_format)
if with_metadata:
transform_query = "%smd()" % transform_query
return self.connection.get_file_data(self.name, transform_query).text
def tag(self, attribute, metadata):
self.connection.put_metadata(self.name, attribute, metadata)
def delete(self):
self.connection.delete_file(self.name)
|
tubbeg/WSGI_server | run_server.py | <reponame>tubbeg/WSGI_server<filename>run_server.py
import sys
from server import Server
from urllib.request import urlopen
import json
def make_server(server_address, application):
server = Server(server_address, 2000)
server.set_app(application)
return server
def main():
module = __import__("flaskapp")
application = getattr(module, "app")
host = ''
port = 80
server = make_server((host, port), application)
server.serve_forever()
if __name__ == '__main__':
main()
|
tubbeg/WSGI_server | server.py | <gh_stars>0
import datetime
import io
import os
import gevent
from gevent import socket
from gevent.pool import Pool
import sys
"""
# WSGI_server
A WSGI server implemented with python and gevent.
###About
Do not use this server in production. Security is pretty much
nonexistent. Also see the Todo list before actually using this
server.
This is not a 100 % pure WSGI server implementation. There
are still a few things missing, but it's good enough to run
web frameworks like Flask or Django.
This implementation uses greenlets since they have I/O centric
scheduling. Python threads are limited by GIL, and processes
are usually expensive. But gevent greenlets yield automatically
when blocking I/O which makes them really good for networking.
###Performance
It's possible to get 400 requests per second using Flask. This
was measured using ApacheBench (https://httpd.apache.org/docs/2.4/programs/ab.html)
## Todo
* Signal handling (SIGINT for instance) for terminating the server.
* Greenlets do not join the server on termination (gevent.joinall)
* Implement 404 return on incorrect request. Currently if the client sends an incorrect
http request line then it will result in an exception on the server
* Return write on start_response. This is still not a pure WSGI server
##More information
This implementation was inspired by:
* https://ruslanspivak.com/lsbaws-part2/
* PEP3333
* http://blog.pythonisito.com/2012/07/introduction-to-gevent.html
* https://itnext.io/build-gunicorn-from-scratch-d75870960b9b
* https://docs.python.org/3/library/wsgiref.html#wsgiref.simple_server.WSGIServer
"""
class Server(object):
def __init__(self, server_address, nr_of_greenlets):
self._socket, self._server_name, self._port = self.__init_socket(server_address)
self._headers = None
self._status = None
self._app = None
self._path = None
self._request_version = None
self._request_data = None
self._request_method = None
self._pool = Pool(nr_of_greenlets)
def __init_socket(self, server_address):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #IPV4 and TCP
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # enables reusing the socket
server_socket.bind(server_address)
server_socket.listen(5) # standard queue size
host, port = server_socket.getsockname()[:2]
server_name = socket.getfqdn(host)
return server_socket, server_name, port
def get_environ(self):
environ = {}
environ['SERVER_NAME'] = self._server_name
environ['REQUEST_METHOD'] = self._request_method
environ['PATH_INFO'] = self._path
environ['SERVER_PORT'] = str(self._port)
environ['wsgi.url_scheme'] = 'http'
environ['wsgi.input'] = io.StringIO(self._request_data) # creates a string stream
environ['wsgi.errors'] = sys.stderr
environ['wsgi.multithread'] = False # greenlets how will this work?
environ['wsgi.multiprocess'] = False # too expensive
environ['wsgi.run_once'] = False
environ['wsgi.version'] = (1, 0)
return environ
def start_response(self, status, response_headers, exc_info=None):
response = dict((x, y) for x, y in response_headers)
response['Date'] = datetime.datetime.utcnow().date()
response['Server'] = 'Server 1.0'
#print(response)
self._headers = response
self._status = status
def set_app(self, app):
self._app = app
def serve_forever(self):
while True: # <-- this is not good
# get connection and address
client_connection, _ = self._socket.accept()
#self.handle_request(client_connection)
self._pool.spawn(self.handle_request, client_connection)
def handle_request(self, client_connection):
data = client_connection.recv(1024)
try:
parsed_request = self.parse_req(data.decode('utf-8'))
self._request_method, self._path, self._request_version = parsed_request
except Exception as e:
print(e)
sys.exit(1)
app_result = self._app(self.get_environ(), self.start_response)
self.send_response(app_result, client_connection)
def parse_req(self, request):
result = request.split()[:3]
if isinstance(result, list):
return result
#print(result)
return Exception("Incorrect http request line")
def make_response(self, app_result):
status, response_headers = self._status, self._headers
response = f'HTTP/1.1 {status}{os.linesep}'
for header in response_headers.items():
key, val = header
response = response + f'{key}: {val}{os.linesep}'
response = response + os.linesep
for item in app_result:
response = response + item.decode('utf-8')
return response
def send_response(self, app_result, client_connection):
try:
response = self.make_response(app_result)
#print(response)
response_bytes = response.encode()
client_connection.sendall(response_bytes)
except Exception as e:
print(e)
client_connection.close() |
cmutnik/world_map | HI_photo_map/make_HI_photo_map.py | #!/usr/bin/python
# <NAME> 200706
# Python script to turn all images in designated dir to thumbnails and plot them on a world map
# varibales in the main() need to be modified accordingly
from PIL import Image
import pandas as pd
import folium
import os
'''
TODO
make sure all images in list exist in both photos and thumbnails dir
maybe leave off thumbnails not in photolist.csv
preserve photo aspect ratio
call lat/long from efix data?
Do polygon markers showup offline but icon markers dont or is it a cache issue?
at bottom of each function, print len of image set used: len(df), len(glob(path_to_thumbnails))
possibly set variables as global, to compare the len with '==':
global lendf = len(df)
'''
def main():
'''
Set the variables to be used
path names
accepted image types
image thumbnail sizes
output name of map
'''
# path images stored in
_path_to_original_images = './photos/originals/'
# path you want thumbnails saved to
_path_for_thumnails = './photos/thumbnails/'
# name of final photo map file
_mapname='HI_photo_map.html'
# thumbnail dimensions and size of boarder arund image (on photomap)
_imgsize = 500
# list of accepted image extensions
_extensions = ('.jpg', '.JPG', '.png', '.PNG')
# call function to make thumbnails
makeThumbnails(_path_to_original_images, _path_for_thumnails, _imgsize, _extensions)
# call function to make photo map
makePhotoMap(_mapname, _imgsize, _path_for_thumnails)
def makeThumbnails(path_to_original_images, path_for_thumnails, imgsize, extensions):
'''
Function to make thumbnails out of all images we want to add to the final page
'''
# make directory for thumbnails, if it doesnt exist
if not os.path.exists(path_for_thumnails):
os.makedirs(path_for_thumnails)
for photos in os.listdir(path_to_original_images):
#if photos.endswith(".jpg") or photos.endswith(".JPG") or photos.endswith(".png") or photos.endswith(".PNG"):
# only run loop over files that have correct extensions
if photos.endswith(extensions):
outFilename = path_for_thumnails + photos
# check if thumbnail already exists
if os.path.isfile(outFilename):
print('Thumbnail already exists for: ', photos)
else:
# open and resize images
img = Image.open(path_to_original_images + photos)
img.thumbnail([imgsize, imgsize],Image.ANTIALIAS)
# save ouput file
img.save(outFilename)
print('thumbnail made for: ', photos)
else:
#print(photos + ' doesnt have the correct extension, a thumbnail was not made')
print('thumbnail not made for: ', photos)
#def makePhotoMap(mapname='./Beth_Jeff_Adventures_thumbnails.html'):
def makePhotoMap(mapname, imgsize, path_for_thumnails):
'''
Function to take all images from list and add them to photo map
'''
# do for all images in file, not just one image
df = pd.read_csv('image_data.csv')
# append thumbnail directory to image names
df[df.columns[0]] = path_for_thumnails + df[df.columns[0]]
# make list out of photos
imgpaths=df[df.columns[0]].to_list()
# batch coordinates for each image
#imgcoords=[[df['lat'][i], df['long'][i]] for i in range(len(df))]
imgcoords=[[df[df.columns[1]][i], df[df.columns[2]][i]] for i in range(len(df))]
# make list of image descriptions
#describeimgs=df[df.columns[3]].to_list()
m = folium.Map(imgcoords[0], zoom_start=8)
testNOloop = [folium.Html('<img src='+imgpaths[i]+'>', script=True) for i in range(len(imgpaths))]
#####
# add marker for each popup
#####
# add marker (no popup)
#[folium.Marker(imgcoords[i]).add_to(m) for i in range(len(imgcoords))]
# use a regular polygon as a marker, with popup
#[folium.RegularPolygonMarker(location=imgcoords[j], popup=popup1[j],).add_to(m) for j in range(len(imgcoords))]
# use standard blue marker, with popup
#popup1 = [folium.Popup(testNOloop[i], max_width=imgsize) for i in range(len(testNOloop))]
#[folium.Marker(location=imgcoords[j], popup=popup1[j],).add_to(m) for j in range(len(imgcoords))]
#
#[folium.RegularPolygonMarker(
[folium.Marker(
location=imgcoords[j],
popup=folium.Popup(testNOloop[j], max_width=imgsize),
#icon=folium.Icon(color='green')
icon=folium.Icon(color='red', icon='picture')
).add_to(m) for j in range(len(imgcoords))]
m.save(mapname)
return print(mapname + ' was made')
# call function to make tumbnails and load them onto a map
main() |
cmutnik/world_map | other_modifications/imagerotations/01_rotateimages.py | <gh_stars>0
from PIL import ExifTags, Image
filename = './path/to/inimg.JPG'
output_fname = './outpath/to/outimg.JPG'
img = Image.open(filename)
#print(img._getexif().items())
exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)
# check image orientation
print(exif['Orientation'])
if not exif['Orientation']:
img=img.rotate(90, expand=True)
elif exif['Orientation']== 6:
img=img.rotate(270, expand=True)
img.thumbnail((1000,1000), Image.ANTIALIAS)
img.save(output_fname, "JPEG") |
cmutnik/world_map | other_modifications/imagerotations/00_rotateimages.py | <filename>other_modifications/imagerotations/00_rotateimages.py<gh_stars>0
from PIL import Image, ExifTags
# open image
image = Image.open('./path/to/image.jpg')
# use try to only apply code to images with exif data
try:
# get orientation value from exif data
image_exif = image._getexif()
image_orientation = image_exif[274]
# rotate images based on orientation value
if image_orientation == 3:
rotated = image.rotate(180)
if image_orientation == 6:
rotated = image.rotate(-90)
if image_orientation == 8:
rotated = image.rotate(90)
# save image
rotated.save('outimg.jpg')
except:
pass
|
cmutnik/world_map | other_modifications/geotag_EXIF/exif_from_photo.py | <reponame>cmutnik/world_map<filename>other_modifications/geotag_EXIF/exif_from_photo.py
# https://developer.here.com/blog/getting-started-with-geocoding-exif-image-metadata-in-python3
#image_name = './photos/dad/IMG_0195.JPG'
image_name='photo_map/photos/dad/IMG_0195.JPG'
from PIL import Image
def get_exif(filename):
image = Image.open(filename)
image.verify()
return image._getexif()
#exif = get_exif(image_name)
#print(exif)
from PIL.ExifTags import TAGS
def get_labeled_exif(exif):
labeled = {}
for (key, val) in exif.items():
labeled[TAGS.get(key)] = val
return labeled
exif = get_exif(image_name)
labeled = get_labeled_exif(exif)
print(labeled)
"""
from PIL.ExifTags import GPSTAGS
def get_geotagging(exif):
if not exif:
raise ValueError("No EXIF metadata found")
geotagging = {}
for (idx, tag) in TAGS.items():
if tag == 'GPSInfo':
if idx not in exif:
raise ValueError("No EXIF geotagging found")
for (key, val) in GPSTAGS.items():
if key in exif[idx]:
geotagging[val] = exif[idx][key]
return geotagging
exif = get_exif(image_name)
geotags = get_geotagging(exif)
print(geotags)
""" |
cmutnik/world_map | other_modifications/geotag_EXIF/pull_all_exif.py | from PIL import ExifTags, Image
# set input image location
filename = './path/to/inimg.JPG'
# open image
img = Image.open(filename)
#print(img._getexif().items())
# pull all the exif data
exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)
# print all exif data associated with the image
print(exif)
|
cmutnik/world_map | simple_map/makeSimpleHTMLmap.py | import folium
img1Path='./photos/IMG_0195.JPG'
img1coords=[42.3730, -73.3677]
m = folium.Map(img1coords, zoom_start=10)
#test = folium.Html('<b>Hello world</b>', script=True)
test = folium.Html('<img src='+img1Path+' alt="Italian Trulli">', script=True)
# add pin
folium.Marker(img1coords).add_to(m)
# add popup
popup1 = folium.Popup(test, max_width=2650)
folium.RegularPolygonMarker(
location=img1coords, popup=popup1,
).add_to(m)
m.save('simpleMap.html') |
cmutnik/world_map | Multi_image_sources/make_Multi_image_sources.py | #!/usr/bin/python
# <NAME> 200706
# Python script to turn all images in designated dir to thumbnails and plot them on a world map
# varibales in the main() need to be modified accordingly
from PIL import Image
import pandas as pd
import folium
import os
'''
TODO
make sure all images in list exist in both photos and thumbnails dir
maybe leave off thumbnails not in photolist.csv
preserve photo aspect ratio
call lat/long from efix data?
Do polygon markers showup offline but icon markers dont or is it a cache issue?
at bottom of each function, print len of image set used: len(df), len(glob(path_to_thumbnails))
possibly set variables as global, to compare the len with '==':
global lendf = len(df)
'''
def main():
'''
Set the variables to be used
path names
accepted image types
image thumbnail sizes
output name of map
'''
# path images stored in
_path_to_original_images = './photos/originals/'
# path you want thumbnails saved to
_path_for_thumnails = './photos/thumbnails/'
# name of final photo map file
_mapname='Multi_image_sources.html'
# thumbnail dimensions and size of boarder arund image (on photomap)
_imgsize = 500
# list of accepted image extensions
_extensions = ('.jpg', '.JPG', '.jpeg', '.png', '.PNG')
# call function to make thumbnails
makeThumbnails(_path_to_original_images, _path_for_thumnails, _imgsize, _extensions)
# call function to make photo map
makePhotoMap(_mapname, _imgsize, _path_for_thumnails)
def makeThumbnails(path_to_original_images, path_for_thumnails, imgsize, extensions):
'''
Function to make thumbnails out of all images we want to add to the final page
'''
# make directory for thumbnails, if it doesnt exist
if not os.path.exists(path_for_thumnails):
os.makedirs(path_for_thumnails)
for photos in os.listdir(path_to_original_images):
# only run loop over files that have correct extensions
if photos.endswith(extensions):
outFilename = path_for_thumnails + photos
# check if thumbnail already exists
if os.path.isfile(outFilename):
print('Thumbnail already exists for: %s ' % photos)
else:
# open image
img = Image.open(path_to_original_images + photos)
#####
# Make sure thumbnails are rotated correctly
#####
# use try to only apply code to images with exif data
try:
# get orientation value from exif data
image_exif = img._getexif()
image_orientation = image_exif[274]
# rotate images based on orientation value
if image_orientation == 3:
img = img.rotate(180)
if image_orientation == 6:
img = img.rotate(-90)
if image_orientation == 8:
img = img.rotate(90)
except:
pass
# once rotated, make a thumbnail of each image
img.thumbnail([imgsize, imgsize],Image.ANTIALIAS)
# save ouput image
img.save(outFilename)
print('thumbnail made for: %s' % photos)
else:
#print(photos + ' doesnt have the correct extension, a thumbnail was not made')
print('thumbnail not made for: %s' % photos)
#def makePhotoMap(mapname='./Beth_Jeff_Adventures_thumbnails.html'):
def makePhotoMap(mapname, imgsize, path_for_thumnails):
'''
Function to take all images from list and add them to photo map
'''
# initialize map
m = folium.Map([42.3730,-73.3677], zoom_start=3, tiles='Stamen Terrain')
def addimagestomap(df, pincolor):
# append thumbnail directory to image names
df[df.columns[0]] = path_for_thumnails + df[df.columns[0]]
# make list out of photos
imgpaths=df[df.columns[0]].to_list()
# batch coordinates for each image
imgcoords=[[df[df.columns[1]][i], df[df.columns[2]][i]] for i in range(len(df))]
testNOloop = [folium.Html('<img src='+imgpaths[i]+'>', script=True) for i in range(len(imgpaths))]
#####
# add marker for each popup
#####
[folium.Marker(
location=imgcoords[j],
popup=folium.Popup(testNOloop[j], max_width=imgsize),
#icon=folium.Icon(color='green')
icon=folium.Icon(color=pincolor, icon='picture')
).add_to(m) for j in range(len(imgcoords))]
counter = len(imgcoords)
return print(counter, ' ' + pincolor + ' pins added')
################################################################
##### Use Different Color Pins to Distinguish Photos Owner #####
################################################################
# do for all images in dukes image file
df_dukes = pd.read_csv('./photos/dukesPics.csv')
# add dukes images to map
addimagestomap(df=df_dukes, pincolor='red')
# do for all images in takos image file
df_tako = pd.read_csv('./photos/TakosPics.csv')
# add takos images to map
addimagestomap(df=df_tako, pincolor='green')
# save completed map
m.save(mapname)
return print(mapname + ' was made')
# call function to make tumbnails and load them onto a map
main() |
hanss314/higumei-scriptextraction | extract_outfits.py | """
Given a higumei script, extracts all the lines containing text which is shown to the player.
Useful for translation.
Also prefixes each line with the command index, for integrating the translated text back into the commands.
Outputs in the following format
<command index>:
<Speaker>
<Line 1>
<Line 2> ...
Each command is separated by two newlines.
"""
import csv
from sys import argv, stdout
from base_processor import BaseScriptProcessor
outfits = set()
bgs = set()
bgm = set()
ses = set()
shaders = set()
class ExtractLines(BaseScriptProcessor):
def __init__(self, filename):
super().__init__()
self.filename = filename
self.linecount = 0
self.rows = []
def charaload(self, raw, cmd, chara):
outfits.add(chara)
def se2(self, raw, cmd, thing, _=0):
ses.add(thing)
def background(self, raw, cmd, thing):
bgs.add(thing)
def bgm(self, raw, cmd, thing):
bgm.add(thing)
def shader(self, raw, cmd, shader):
shaders.add(shader)
def run(self):
self.process_file(self.filename)
if __name__ == '__main__':
for arg in argv[1:]:
ExtractLines(arg).run()
print('Outfits:')
print('\n'.join(outfits))
print('\nBackgrounds:')
print('\n'.join(bgs))
print('\nMusic:')
print('\n'.join(bgm))
print('\nSound Effects:')
print('\n'.join(ses))
# print('\n'.join(shaders))
|
hanss314/higumei-scriptextraction | extract_lines.py | <reponame>hanss314/higumei-scriptextraction<gh_stars>0
"""
Given a higumei script, extracts all the lines containing text which is shown to the player.
Useful for translation.
Also prefixes each line with the command index, for integrating the translated text back into the commands.
Outputs in the following format
<command index>:
<Speaker>
<Line 1>
<Line 2> ...
Each command is separated by two newlines.
"""
import csv
from sys import argv, stdout
from base_processor import BaseScriptProcessor
tl_dict = {
"千雨": "Chisame",
"一穂": "Kazuho",
"梨花": "Rika",
"羽入": "Hanyuu",
"魅音": "Mion",
"美雪": "Miyuki",
"沙都子": "Satoko",
"悟史": "Satoshi",
"夏美": "Natsumi",
"レナ": "Rena",
"菜央": "Nao",
'私服': 'Casual',
# To be completed
}
def tl_name(name):
if name == ':': return ''
name = name.split(':')
if len(name) == 1:
name.append(None)
name, outfit = name
name = tl_dict.get(name, name)
outfit = tl_dict.get(outfit, outfit)
#outfit = tl_dict[outfit]
if outfit is None:
return f'{name}'
else:
return f'{name} ({outfit})'
class ExtractLines(BaseScriptProcessor):
def __init__(self, filename):
super().__init__()
self.filename = filename
self.linecount = 0
self.rows = []
def showtext(self, chara, text):
self.rows.append((self.linecount, tl_name(chara), text))
self.linecount += 1
def handle_default(self, cmd, raw):
self.linecount += 1
def run(self):
self.process_file(self.filename)
def print(self):
writer = csv.writer(stdout)
writer.writerow(('index', 'chara', 'jp', 'en'))
for row in self.rows: writer.writerow(row)
if __name__ == '__main__':
print(argv[1])
e = ExtractLines(argv[1])
e.run()
e.print()
|
hanss314/higumei-scriptextraction | base_processor.py | import json
import sys
from inspect import getcallargs
class BaseScriptProcessor:
"""
General handler functions. Override these in your subclass.
Parameter names
----------
raw: Raw command, as a dictionary
cmd: name of command, either cmd0 or cmd1 in the raw command
motion: some type of motion, value is usually in japanese.
filename: not actually a filename, but a name referring to a resource to be used by the command
zoom: I don't actually know what this does, this is a guess
color: Probably a color, only value I've seen for this is 黑
chara: A character sprite, probably
position: usually left, right, centre etc. I think
a,b,c,d,x,y: No idea what these do.
sometimes ints can be RANDOM, DOWN, UP or something else like that
set_var takes two variable names, a target and a source.
it sets the value of the target to the value of the source with an offset (I assume)
"""
def shakeset(self, raw, cmd, motion, a:int, b:int, c:int, d:int): self.handle_default(raw, cmd)
def zoom(self, raw, cmd, zoom:float, position, x:int, y:int=0): self.handle_default(raw, cmd)
def charaload(self, raw, cmd, chara): self.handle_default(raw, cmd)
def set_var(self, raw, cmd, name, value, shift:int=0): self.handle_default(raw, cmd)
def background(self, raw, cmd, filename): self.handle_default(raw, cmd)
def bgm(self, raw, cmd, filename, time:int=-1): self.handle_default(raw, cmd)
def fadein(self, raw, cmd, speed:int): self.handle_default(raw, cmd)
def motion(self, raw, cmd, chara, motion, position=None, a:int=0): self.handle_default(raw, cmd)
def hide(self, raw, cmd, chara, speed:int=0): self.handle_default(raw, cmd)
def chara(self, raw, cmd, chara, motion, position, x:int, y:int): self.handle_default(raw, cmd)
def fadeout(self, raw, cmd, color, speed:int): self.handle_default(raw, cmd)
def shakedisp(self, raw, cmd, motion): self.handle_default(raw, cmd)
def shakechara(self, raw, cmd, chara, motion): self.handle_default(raw, cmd)
def se2(self, raw, cmd, filename, t=None): self.handle_default(raw, cmd)
def wait(self, raw, cmd, time:int): self.handle_default(raw, cmd)
def serifclose(self, raw, cmd): self.handle_default(raw, cmd)
def move(self, raw, cmd, chara, position, x, y): self.handle_default(raw, cmd)
def bgmstop(self, raw, cmd, a:int=0): self.handle_default(raw, cmd)
def wipeout(self, raw, cmd): self.handle_default(raw, cmd)
def wipein(self, raw, cmd): self.handle_default(raw, cmd)
def shader(self, raw, cmd, filename, a:int = 0): self.handle_default(raw, cmd)
def setdispname(self, raw, cmd, name, chara): self.handle_default(raw, cmd)
def removedispname(self, raw, cmd, name): self.handle_default(raw, cmd)
def effect(self, raw, cmd, filename): self.handle_default(raw, cmd)
def voice(self, raw, cmd, filename): self.handle_default(raw, cmd)
def handle_default(self, raw, cmd):
return
print(f'No handler for {cmd} falling back to default.', file=sys.stderr)
def showtext(self, chara, text):
"""chara says text"""
self.handle_default('showtext', '')
def __init__(self):
self.command_dict = {
'charaload': self.charaload,
'変数': self.set_var,
'shakeset': self.shakeset,
'背景': self.background,
'bgm2': self.bgm,
'bgm': self.bgm,
'fadein': self.fadein,
'motion': self.motion,
'hide': self.hide,
'chara': self.chara,
'fadeout': self.fadeout,
'zoom': self.zoom,
'shakedisp': self.shakedisp,
'shakechara': self.shakechara,
'se2': self.se2,
'wait': self.wait,
'serifclose': self.serifclose,
'move': self.move,
'bgmstop': self.bgmstop,
'wipeout': self.wipeout,
'wipein': self.wipein,
'shader': self.shader,
'setdispname': self.setdispname,
'removedispname': self.removedispname,
'effect': self.effect,
'voice': self.voice,
}
def process_list(self, cmds : [dict]):
self.seen = set()
for command in cmds:
self.process_command(command)
@staticmethod
def extract_cmd(command):
if 'cmd0' in command:
cmd = command['cmd0']
cmd_num = 0
elif 'cmd1' in command:
cmd = command['cmd1']
cmd_num = 1
else:
raise ValueError('Command not found')
return cmd, cmd_num
@staticmethod
def get_arg_list(command):
args = []
for i in range(10):
if f'arg{i}' in command:
args.append(command[f'arg{i}'])
return args
def call_func(self, f, *args):
callargs = getcallargs(f, *args)
del callargs['self']
for k in callargs:
if k in f.__annotations__:
try: callargs[k] = f.__annotations__[k](callargs[k])
except ValueError: pass
return f(**callargs)
def process_command(self, command):
cmd, cmd_num = self.extract_cmd(command)
if ':' in cmd:
self.showtext(cmd, command['arg1'])
elif cmd in self.command_dict:
self.call_func(self.command_dict[cmd], command, cmd, *self.get_arg_list(command))
elif 'arg1' in command and 'arg0' not in command:
self.showtext(cmd, command['arg1'])
elif cmd not in self.seen:
self.seen.add(cmd)
print(f'def {cmd}(self, raw, cmd, {", ".join(self.get_arg_list(command))}): self.handle_default(raw, cmd)')
else:
self.handle_default(command, cmd)
def process_file(self, filename: str):
with open(filename, 'rb') as json_file:
data = json.load(json_file)
if 'scr' not in data:
raise ValueError('Expected "scr" field in json')
self.process_list(data['scr'])
if __name__ == '__main__':
for f in sys.argv[1:]:
try:
BaseScriptProcessor().process_file(f)
except:
print(f)
break
|
Maxz44/euler | euler.py | #!/usr/bin/env python3
# --- Euler pb 1
# Multiples of 3 and 5
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
# The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
def euler1(n):
return sum([x for x in range(1, n) if x % 3 == 0 or x % 5 == 0])
# euler1(1000)
# --- Euler pb 2
# Even Fibonacci numbers
# Each new term in the Fibonacci sequence is generated by adding the previous two terms.
# By starting with 1 and 2, the first 10 terms will be:
#
# 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
#
# By considering the terms in the Fibonacci sequence whose values do not exceed four million,
# find the sum of the even-valued terms.
def fib(n):
if (n > 3):
fib = [1, 1]
i = 0
for i in range(n - 2):
fib.append(fib[i] + fib[i+1])
return fib
else:
if n > 0:
return 1
else:
print('chose an integer value')
def euler2(n):
fib = [1, 2]
i = 0
while fib[-1] < n:
fib.append(fib[i] + fib[i+1])
i += 1
return sum([x for x in fib if x % 2 == 0])
# print(euler2(4000000))
# --- Euler pb 3
# Largest prime factor
# The prime factors of 13195 are 5, 7, 13 and 29.
#
# What is the largest prime factor of the number 600851475143 ?
def isprime(n):
hasfactor = n % 2 == 0
i = 2
from math import sqrt
while not hasfactor and i <= int(sqrt(n)):
i += 1
hasfactor = n % i == 0
return not hasfactor
def euler3(n):
lastfact = 1
from math import sqrt
for i in range(3, int(sqrt(n))):
if n % i == 0 and isprime(i):
lastfact = i
return lastfact
# print(euler3(600851475143))
# --- Euler pb 4
# A palindromic number reads the same both ways. The largest palindrome made
# from the product of two 2-digit numbers is 9009 = 91 × 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
def nb_is_palindrom(n):
return str(n) == str(n)[::-1]
def euler4(n):
rslt = set()
for i in range(10**(n-1), 10**n):
for j in range(10**(n-1), 10**n):
rslt.add(i*j)
rslt = [x for x in rslt if nb_is_palindrom(x)]
return max(rslt)
# print(euler4(3))
# --- Euler pb 5
# Smallest multiple
# 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
# What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
def is_divisible_range(n, start, end):
rslt = True
for i in range(start, end+1):
rslt = rslt and n % i == 0
return rslt
def euler5(start, end):
found = False
i = end + 1
while not found:
i += 1
found = is_divisible_range(i, start, end)
return i
# print(euler5(1, 20))
# --- Euler 6
# Sum square difference
# The sum of the squares of the first ten natural numbers is,
# 1**2 + 2**2 + ... + 10**2 = 385
# The square of the sum of the first ten natural numbers is,
# (1 + 2 + ... + 10)**2 = 55**2 = 3025
# Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is
# 3025 - 385 = 2640
# Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
def euler6(n):
a = sum([x**2 for x in range(1, n+1)])
b = sum(range(1, n+1))**2
return b - a
# print(euler6(100))
# --- Euler 7
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
# What is the 10 001st prime number?
def euler7(n):
# nb to test as prime
x = 2
# nb of prime nb found
prime_n = 1
while prime_n < n:
x += 1
if isprime(x):
prime_n += 1
return x
# print(euler7(10_001))
# --- Euler 8
# https://projecteuler.net/problem=8
def euler8(n):
nb_series = ''.join((
"73167176531330624919225119674426574742355349194934",
"96983520312774506326239578318016984801869478851843",
"85861560789112949495459501737958331952853208805511",
"12540698747158523863050715693290963295227443043557",
"66896648950445244523161731856403098711121722383113",
"62229893423380308135336276614282806444486645238749",
"30358907296290491560440772390713810515859307960866",
"70172427121883998797908792274921901699720888093776",
"65727333001053367881220235421809751254540594752243",
"52584907711670556013604839586446706324415722155397",
"53697817977846174064955149290862569321978468622482",
"83972241375657056057490261407972968652414535100474",
"82166370484403199890008895243450658541227588666881",
"16427171479924442928230863465674813919123162824586",
"17866458359124566529476545682848912883142607690042",
"24219022671055626321111109370544217506941658960408",
"07198403850962455444362981230987879927244284909188",
"84580156166097919133875499200524063689912560717606",
"05886116467109405077541002256983155200055935729725",
"71636269561882670428252483600823257530420752963450",
))
max_prod = (0, '')
for i, char in enumerate(nb_series):
# Look n ahead if we have enougth char left
if i + n <= len(nb_series):
prod = int(char)
mystring = char
for j in range(1, n):
mystring += nb_series[i + j]
prod = prod * int(nb_series[i + j])
if prod > max_prod[0]:
max_prod = prod, mystring
return max_prod[0]
# print(euler8(13)) |
amoodie/StratGAN | StratGAN/utils.py | import numpy as np
import os, sys
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import string
import random
import json
class Config:
"""
dummy config class for storing info during generation of GAN/painter
"""
pass
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def plot_images(images, n_categories, image_dim=None, labels=None):
if not image_dim:
image_dim = np.sqrt(images.shape[1])
gd = (n_categories, images.shape[0] // n_categories) # grid image dimensions
fig = plt.figure(figsize=(gd[1], gd[0]))
gs = gridspec.GridSpec(gd[0], gd[1])
gs.update(wspace=0.05, hspace=0.05)
for i, (image, label) in enumerate(zip(images, labels)):
ax = plt.subplot(gs[i])
ax.text(0.8, 0.8, str(label),
backgroundcolor='white', transform=ax.transAxes)
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(image.reshape(image_dim, image_dim), cmap='Greys_r')
return fig
def mkdirs(folder_list):
"""makes all folders in folderlist if they don't exist"""
for f in iter(folder_list):
if not os.path.exists(f):
os.makedirs(f)
def training_sample_set(z_dim, n_labels):
n_samples = 10 # how many samples to make of each labels
# make a set of zs to use over and over
zs = np.random.uniform(-1, 1, [n_labels*n_samples, z_dim]).astype(np.float32)
# make a set of labels to use
idx = np.zeros((n_labels*n_samples))
for i in np.arange(n_labels):
cat_idx = np.tile(i, (1, n_samples))
idx[i*n_samples:i*n_samples+n_samples] = cat_idx
_labels = np.zeros((n_samples*n_labels, n_labels))
_labels[np.arange(n_samples*n_labels), idx.astype(np.int)] = 1
labels = _labels
return zs, labels
def rand_id(size=8, chars=string.ascii_uppercase + string.digits):
# ripped from:
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python/2257449#2257449
return ''.join(random.choice(chars) for _ in range(size))
def write_config(model):
atts = vars(model.config)
with open(os.path.join(model.train_log_dir, 'config.json'), 'w') as fp:
json.dump(atts, fp, sort_keys=True, indent=4)
def label_maker(_label, n_categories):
"""make a label (not necessarily a one hot) for evals"""
label = np.zeros((1, n_categories))
if isinstance(_label, (list)):
pass
# thsi is where I would unpack into something meaningful
else:
_label = _label
def post_sampler():
for i in np.arange(0, 10):
patch = self.sess.run(self.G, feed_dict={self.z: np.random.uniform(-1, 1, [1, self.config.z_dim]).astype(np.float32),
self.y: np.array([[1, 0, 0, 0, 0, 0]]),
self.is_training: False})
fig, ax = plt.subplots()
ax.imshow(patch.squeeze(), cmap='gray')
# plt.axis('off')
ax.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
right=False,
left=False,
labelleft=False,
labelbottom=False) # labels along the bottom edge are off
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.savefig('post/%04d.eps' % i, bbox_inches='tight', format='eps', dpi=200)
plt.savefig('post/%04d.png' % i, bbox_inches='tight', dpi=200)
plt.close() |
amoodie/StratGAN | StratGAN/paint.py | <reponame>amoodie/StratGAN
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import sys
import numpy as np
from random import randint
import tensorflow as tf
import os
import abc
"""
A fair number of the algorithm's in this module are taken from:
https://github.com/afrozalm/Patch-Based-Texture-Synthesis
Which did not carry a license at the time of use.
"""
class CanvasPainter(object):
"""
defines all methods for quilting,
superclasses just overwrite the next_patch method
"""
def __init__(self, stratgan, paint_label=None,
canvas_width=1000, canvas_height=None,
patch_overlap=24, batch_dim=1):
print(" [*] Building painter...")
__metaclass__ = abc.ABCMeta
self.sess = stratgan.sess
self.stratgan = stratgan
self.config = stratgan.config
self.paint_samp_dir = self.stratgan.paint_samp_dir
self.out_data_dir = self.stratgan.out_data_dir
self.batch_dim = batch_dim
if not paint_label == 0 and not paint_label:
print('Label not given for painting, assuming zero for label')
self.paint_label = np.zeros((self.batch_dim, stratgan.data.n_categories))
self.paint_label[:, 0] = 1
self.paint_label_int = 0
else:
# label = tf.one_hot(label, self.config.n_categories)
self.paint_label = np.zeros((self.batch_dim, stratgan.data.n_categories))
self.paint_label[:, paint_label] = 1
self.paint_label_int = paint_label
# dump the input canvas size etc into fields
self.canvas_width = canvas_width
if not canvas_height:
self.canvas_height = int(canvas_width / 4)
else:
self.canvas_height = canvas_height
self.patch_overlap = patch_overlap
self.patch_height = self.patch_width = self.config.h_dim
self.patch_numel = self.patch_height * self.patch_width
# generate the list of patch coordinates
self.patch_xcoords, self.patch_ycoords = self.calculate_patch_coords()
self.patch_count = self.patch_xcoords.size
# cull down the canvas size to match (orphan boundaries)
self.canvas_width = self.patch_xcoords[-1] + self.patch_width
self.canvas_height = self.patch_ycoords[-1] + self.patch_height
self.canvas = np.ones((self.canvas_height, self.canvas_width))
self.target_canvas = 0.5 * np.ones((self.canvas_height, self.canvas_width))
self.quilted_canvas = np.zeros((self.canvas_height, self.canvas_width), dtype=bool)
# by default there is no ground truth objects
# self.groundtruth_type = None
def calculate_patch_coords(self):
"""
calculate location for patches to begin, currently ignores mod() patches
"""
w = np.hstack((np.array([0]), np.arange(self.patch_width-self.patch_overlap,
self.canvas_width-self.patch_overlap,
self.patch_width-self.patch_overlap)[:-1]))
h = np.hstack((np.array([0]), np.arange(self.patch_height-self.patch_overlap,
self.canvas_height-self.patch_overlap,
self.patch_height-self.patch_overlap)[:-1]))
xm, ym = np.meshgrid(w, h)
x = xm.flatten()
y = ym.flatten()
return x, y
def add_next_patch(self, calculate_mcb=True):
"""
find new patch for quiliting, must pass error threshold
"""
self.patch_xcoord_i = self.patch_xcoords[self.patch_i]
self.patch_ycoord_i = self.patch_ycoords[self.patch_i]
self.patch_coords_i = (self.patch_xcoord_i, self.patch_ycoord_i)
next_patch = self.generate_next_patch()
_, patch_error_surf = self.calculate_patch_error_surf(next_patch)
# calculate the minimum cost boundary
if calculate_mcb:
mcb = self.calculate_min_cost_boundary(patch_error_surf)
else:
mcb = None
# then quilt it
self.quilt_patch(self.patch_coords_i, next_patch, mcb)
self.patch_i += 1
def fill_canvas(self):
# generate a random sample for the first patch and quilt into image
# first_patch = self.generate_next_patch()
# # quilt into the first coord spot
# self.patch_coords_i = (self.patch_xcoords[self.patch_i], self.patch_ycoords[self.patch_i])
# self.quilt_patch(self.patch_coords_i, first_patch, mcb=None)
print("filling")
self.patch_i = 0
self.add_next_patch(calculate_mcb=False)
# main routine to fill out the remainder of the quilt
while self.patch_i < self.patch_count:
self.add_next_patch()
sys.stdout.write(" [%-20s] %-3d%% | [%02d]/[%d] patches\n" %
('='*int((self.patch_i*20/self.patch_count)), int(self.patch_i/self.patch_count*100),
self.patch_i, self.patch_count))
if self.patch_i % 20 == 0:
samp = plt.imshow(self.canvas, cmap='gray')
plt.savefig(os.path.join(self.paint_samp_dir, '%04d.png' % self.patch_i), dpi=600, bbox_inches='tight')
plt.close()
sys.stdout.write(" [%-20s] %-3d%% | [%02d]/[%d] patches\n" %
('='*int((self.patch_i*20/self.patch_count)), int(self.patch_i/self.patch_count*100),
self.patch_i, self.patch_count))
@abc.abstractmethod
def generate_next_patch(self, **kwargs):
"""
abstract method for generating the next patch,
must be implemented in subclass
"""
pass
def add_groundtruth(self, groundtruth):
if groundtruth.canvas.shape != self.canvas.shape:
RuntimeError('ground truth must have common shape with canvas')
self.groundtruth_obj = groundtruth
self.groundtruth_canvas = np.copy(groundtruth.canvas)
self.groundtruth_canvas_overlay = np.copy(groundtruth.canvas_overlay)
self.groundtruth_type = groundtruth.type
self.groundtruth = True
# error surface calculations:
# ----------------------------
def calculate_patch_error_surf(self, next_patch):
if self.patch_xcoord_i == 0:
# a left-side patch, only calculate horizontal
e, e_surf = self.patch_overlap_error_horizntl(next_patch)
elif self.patch_ycoord_i == 0:
# a top-side patch, only calculate vertical
e, e_surf = self.patch_overlap_error_vertical(next_patch)
else:
# a center patch, calculate both
e = np.zeros((2, 1))
e_surf = np.zeros((2, next_patch.shape[0], next_patch.shape[1]))
e[0], e_surf[0, 0:self.patch_overlap, :] = self.patch_overlap_error_horizntl(next_patch)
e[1], e_surf[1, :, 0:self.patch_overlap] = self.patch_overlap_error_vertical(next_patch)
return e, e_surf
def patch_overlap_error_vertical(self, next_patch):
canvas_overlaped = self.canvas[self.patch_ycoord_i:self.patch_ycoord_i+self.patch_height,
self.patch_xcoord_i:self.patch_xcoord_i+self.patch_overlap]
patch_overlaped = next_patch[:, 0:self.patch_overlap]
ev = np.linalg.norm(canvas_overlaped - patch_overlaped)
ev_surf = (canvas_overlaped - patch_overlaped)**2
return ev, ev_surf
def patch_overlap_error_horizntl(self, next_patch):
canvas_overlaped = self.canvas[self.patch_ycoord_i:self.patch_ycoord_i+self.patch_overlap,
self.patch_xcoord_i:self.patch_xcoord_i+self.patch_width]
patch_overlaped = next_patch[0:self.patch_overlap, :]
eh = np.linalg.norm(canvas_overlaped - patch_overlaped)
eh_surf = (canvas_overlaped - patch_overlaped)**2
return eh, eh_surf
# min cost boundary functions:
# ----------------------------
def calculate_min_cost_boundary(self, patch_error_surf):
if self.patch_xcoord_i == 0:
# a left-side patch, only calculate horizontal
mcb = self.min_cost_path_horizntl(patch_error_surf)
elif self.patch_ycoord_i == 0:
# a top-side patch, only calculate vertical
mcb = self.min_cost_path_vertical(patch_error_surf)
else:
# a center patch, calculate both
assert patch_error_surf.shape[1] == patch_error_surf.shape[2] # this will fail if patch not square
mcb = np.zeros((2, patch_error_surf.shape[1]), np.int8)
# print("mcb shape:", mcb.shape)
mcb[0, :] = self.min_cost_path_horizntl(patch_error_surf[0, 0:self.patch_overlap, :])
mcb[1, :] = self.min_cost_path_vertical(patch_error_surf[1, :, 0:self.patch_overlap])
return mcb
def min_cost_path_vertical(self, patch_error_surf):
mcb = np.zeros((self.patch_height), np.int) # mincostboundary
holder = np.zeros((self.patch_height, self.patch_overlap), np.int) # holder matrix for temp storage
for i in np.arange(1, self.patch_height):
# for the height of the patch
for j in np.arange(self.patch_overlap):
# for each col in overlap
if j == 0:
# if first col in row
holder[i,j] = j if patch_error_surf[i-1,j] < patch_error_surf[i-1,j+1] else j+1
elif j == self.patch_overlap - 1:
# if last col in row
holder[i,j] = j if patch_error_surf[i-1,j] < patch_error_surf[i-1,j-1] else j-1
else:
# if center cols
curr_min = j if patch_error_surf[i-1,j] < patch_error_surf[i-1,j-1] else j-1
holder[i,j] = curr_min if patch_error_surf[i-1,curr_min] < patch_error_surf[i-1,j+1] else j+1
patch_error_surf[i,j] += patch_error_surf[i-1, holder[i,j]]
min_idx = 0
for j in np.arange(1, self.patch_overlap):
min_idx = min_idx if patch_error_surf[self.patch_height - 1, min_idx] < patch_error_surf[self.patch_height - 1, j] else j
mcb[self.patch_height-1] = min_idx
for i in np.arange(self.patch_height - 1, 0, -1):
mcb[i - 1] = holder[i, mcb[i]]
return mcb
def min_cost_path_horizntl(self, patch_error_surf):
mcb = np.zeros((self.patch_width), np.int) # mincostboundary
holder = np.zeros((self.patch_overlap, self.patch_width), np.int)
for j in np.arange(1, self.patch_width):
for i in np.arange(self.patch_overlap):
if i == 0:
holder[i,j] = i if patch_error_surf[i,j-1] < patch_error_surf[i+1,j-1] else i + 1
elif i == self.patch_overlap - 1:
holder[i,j] = i if patch_error_surf[i,j-1] < patch_error_surf[i-1,j-1] else i - 1
else:
curr_min = i if patch_error_surf[i,j-1] < patch_error_surf[i-1,j-1] else i - 1
holder[i,j] = curr_min if patch_error_surf[curr_min,j-1] < patch_error_surf[i-1,j-1] else i + 1
patch_error_surf[i,j] += patch_error_surf[holder[i,j], j-1]
min_idx = 0
for i in np.arange(1,self.patch_overlap):
min_idx = min_idx if patch_error_surf[min_idx, self.patch_width - 1] < patch_error_surf[i, self.patch_width - 1] else i
mcb[self.patch_width-1] = min_idx
for j in np.arange(self.patch_width - 1,0,-1):
mcb[j - 1] = holder[mcb[j],j]
return mcb
# Quilting Functions:
# -------------------
def quilt_patch(self, coords, patch, mcb=None):
y = coords[0]
x = coords[1]
if mcb is None:
# first patch, or set to ignore all mcbs
self.canvas[x:x+self.patch_height, y:y+self.patch_width] = np.squeeze(patch)
self.target_canvas[x:x+self.patch_height, y:y+self.patch_width] = np.squeeze(patch)
# self.quilted_canvas[x:x+self.patch_height, y:y+self.patch_width] = True
else:
if self.patch_xcoord_i == 0:
# a left-side patch, over calculate horizontal
self.quilt_overlap_horizntl(coords, patch, mcb)
self.quilt_patch_remainder(coords, patch, switch='h')
elif self.patch_ycoord_i == 0:
# a top-side patch, only calculate vertical
self.quilt_overlap_vertical(coords, patch, mcb)
self.quilt_patch_remainder(coords, patch, switch='v')
else:
# a center patch, calculate both
self.quilt_overlap_horizntl(coords, patch, mcb[0, :])
self.quilt_overlap_vertical(coords, patch, mcb[1, :])
self.quilt_patch_remainder(coords, patch, switch='b')
self.quilted_canvas[x:x+self.patch_width, y:y+self.patch_height] = True
def quilt_overlap_vertical(self, coords, patch, mcb):
y = coords[0]
x = coords[1]
for i in np.arange(self.patch_height):
# for each row in the overlap
for j in np.arange(mcb[i], self.patch_overlap):
# for each column beyond the mcb
self.canvas[x+i, y+j] = patch[i, j]
self.target_canvas[x+i, y+j] = patch[i, j]
# self.quilted_canvas[x+i, y+j] = patch[i, j]
def quilt_overlap_horizntl(self, coords, patch, mcb):
y = coords[0]
x = coords[1]
for i in np.arange(self.patch_width):
# for each column in the overlap
for j in np.arange(mcb[i], self.patch_overlap):
# for each row below mcb
self.canvas[x+j, y+i] = patch[j, i]
self.target_canvas[x+j, y+i] = patch[j, i]
# self.quilted_canvas[x+j, y+i] = True
def quilt_patch_remainder(self, coords, patch, switch):
y = coords[0]
x = coords[1]
if switch == 'h':
x0 = x+self.patch_overlap
patch_remainder = patch[self.patch_overlap:, :]
self.canvas[x0:x+self.patch_width, y:y+self.patch_height] = np.squeeze(patch_remainder)
self.target_canvas[x0:x+self.patch_width, y:y+self.patch_height] = np.squeeze(patch_remainder)
elif switch == 'v':
y0 = y+self.patch_overlap
patch_remainder = patch[:, self.patch_overlap:]
self.canvas[x:x+self.patch_width, y0:y+self.patch_height] = np.squeeze(patch_remainder)
self.target_canvas[x:x+self.patch_width, y0:y+self.patch_height] = np.squeeze(patch_remainder)
# self.quilted_canvas[x:x+self.patch_width, y0:y+self.patch_height] = True
elif switch == 'b':
y0 = y+self.patch_overlap
x0 = x+self.patch_overlap
patch_remainder = patch[self.patch_overlap:, self.patch_overlap:]
self.canvas[x0:x+self.patch_width, y0:y+self.patch_height] = np.squeeze(patch_remainder)
self.target_canvas[x0:x+self.patch_width, y0:y+self.patch_height] = np.squeeze(patch_remainder)
# self.quilted_canvas[x0:x+self.patch_width, y0:y+self.patch_height] = True
def canvas_plot(self, filename, cmap='gray', verticies=False):
fig, ax = plt.subplots()
samp = ax.imshow(self.canvas, cmap=cmap)
if self.groundtruth:
plt.imshow(self.groundtruth_canvas_overlay)
ax.axis('off')
if verticies:
plt.plot(self.patch_xcoords, self.patch_ycoords, marker='.', ls='none', ms=2)
plt.savefig(os.path.join(self.paint_samp_dir, filename), bbox_inches='tight', dpi=300)
plt.close()
class ContextPainter(CanvasPainter):
def __init__(self, stratgan, paint_label,
canvas_width, canvas_height,
patch_overlap, patch_overlap_threshold,
batch_dim=40):
CanvasPainter.__init__(self, stratgan=stratgan,
paint_label=paint_label,
canvas_width=canvas_width,
canvas_height=canvas_height,
patch_overlap=patch_overlap,
batch_dim=batch_dim)
print(" [*] Building painter...")
graph = tf.get_default_graph()
self.gi = graph.get_tensor_by_name('gener/g_in:0')
self.go = graph.get_tensor_by_name('gener/g_prob:0')
self.do = graph.get_tensor_by_name('discr_1/Sigmoid:0')
self.gl = tf.log(1 - self.do)
self.build_input_placeholders()
self.build_context_loss()
self.lam = 2. # weighting for realism
self.gam = 0.2 # adjustment for non-ground truth context
self.perceptual_loss = self.gl
self.inpaint_loss = self.context_loss + self.lam*self.perceptual_loss
self.inpaint_grad = tf.gradients(self.inpaint_loss, self.gi)
self.img_cntr = 0
def build_context_loss(self):
"""Builds the context loss objective"""
self.go = tf.reshape(self.go, [self.batch_dim, -1])
self.context_loss = tf.reduce_sum(
tf.contrib.layers.flatten(
tf.abs(tf.multiply(self.masks, self.go) -
tf.multiply(self.masks, self.targets))), 1)
def build_input_placeholders(self):
# with self.graph.as_default():
self.masks = tf.placeholder(tf.float32,
(self.batch_dim, self.patch_height*self.patch_width),
name='masks')
self.targets = tf.placeholder(tf.float32,
(self.batch_dim, self.patch_height*self.patch_width),
name='targets')
def generate_next_patch(self):
self.extract_context_mask()
v = 0.1
momentum = 1
lr = 0.001
self.z_in = np.random.normal(-1, 1, [self.batch_dim, self.config.z_dim]).astype(np.float32)
# self.writer = tf.summary.FileWriter(self.stratgan.train_log_dir,
# graph=self.sess.graph)
# self.writer.flush()
for i in np.arange(50):
# out_vars = [self.stratgan.G, self.inpaint_loss, self.inpaint_grad]
in_dict={self.stratgan.z: self.z_in,
self.stratgan.y: self.paint_label,
self.stratgan.is_training: False,
self.masks: self.masks0,
self.targets: self.targets0}
out_vars = [self.inpaint_loss, self.inpaint_grad, self.go]
loss, grad, patch = self.sess.run(out_vars, feed_dict=in_dict)
if False and np.mod(i, 10)==0:
patch_reshaped = np.reshape(patch, (self.batch_dim, \
self.patch_width, self.patch_height))
ptch = []
for p in np.arange(3):
ptch.append( patches.Rectangle((self.patch_xcoord_i, self.patch_ycoord_i),
width=self.patch_width, height=self.patch_height,
edgecolor='r', facecolor='None') )
fig = plt.figure()
# fig.subplots_adjust(hspace=0.025, wspace=0.025)
gs = fig.add_gridspec(4, 4)
ax1 = fig.add_subplot(gs[0,1:3])
cnv = ax1.imshow(self.canvas, cmap='gray')
if self.groundtruth:
ax1.imshow(self.groundtruth_canvas_overlay)
ax1.add_patch(ptch[0])
cnv.set_clim(0.0, 1.0)
ax1.axes.xaxis.set_ticklabels([])
ax1.axes.yaxis.set_ticklabels([])
ax2 = fig.add_subplot(gs[1,1:3])
tcnv = ax2.imshow(self.target_canvas, cmap='gray')
ax2.add_patch(ptch[1])
tcnv.set_clim(0.0, 1.0)
ax2.axes.xaxis.set_ticklabels([])
ax2.axes.yaxis.set_ticklabels([])
ax3 = fig.add_subplot(gs[2,1:3])
qcnv = ax3.imshow(self.quilted_canvas, cmap='gray')
ax3.add_patch(ptch[2])
qcnv.set_clim(0.0, 1.0)
ax3.axes.xaxis.set_ticklabels([])
ax3.axes.yaxis.set_ticklabels([])
ax5 = fig.add_subplot(gs[1,0])
tgt = ax5.imshow(self.target_as_image, cmap='gray')
tgt.set_clim(0.0, 1.0)
ax5.axes.xaxis.set_ticklabels([])
ax5.axes.yaxis.set_ticklabels([])
ax4 = fig.add_subplot(gs[2,0])
msk = ax4.imshow(self.mask_as_image, cmap='gray')
msk.set_clim(0.0, 1.0)
ax4.axes.xaxis.set_ticklabels([])
ax4.axes.yaxis.set_ticklabels([])
ax6 = fig.add_subplot(gs[:2,3])
zs = ax6.imshow(self.z_in.T)
zs.set_clim(-1.0, 1.0)
ax6.axes.xaxis.set_ticklabels([])
ax6.axes.yaxis.set_ticklabels([])
ax6.set_xlabel('batch')
r = 3
adj = 0
for o, p in enumerate( np.random.randint(low=0, high=self.batch_dim, size=(4)) ):
# if o>=r:
# r = 4
# adj = 4
axp = fig.add_subplot(gs[r,o])
ptch = axp.imshow(patch_reshaped[p,:,:], cmap='gray')
ptch.set_clim(0.0, 1.0)
axp.axes.xaxis.set_ticklabels([])
axp.axes.yaxis.set_ticklabels([])
# plt.savefig(os.path.join(self.paint_samp_dir, 'context_i.png'),
plt.savefig(os.path.join(self.paint_samp_dir, 'iters/context_{0}.png'.format(str(self.img_cntr).zfill(4))),
bbox_inches='tight', dpi=150, transparent=False)
plt.close()
self.img_cntr += 1
v_prev = np.copy(v)
v = momentum*v - lr*grad[0]
self.z_in += (-momentum * v_prev +
(1 + momentum) * v)
self.z_in = np.clip(self.z_in, -1, 1)
verbose = False
if verbose:
print('Iteration {}: {}'.format(i, np.mean(loss)))
# routine for determining the patch from the batch
min_loc = np.argmin(self.context_loss)
next_patch = np.copy(patch[min_loc,:])
next_patch = next_patch.reshape(self.config.h_dim, self.config.h_dim)
return next_patch
def extract_context_mask(self):
"""
extract the target image and the corresponging mask
"""
target_extract = self.target_canvas[self.patch_ycoord_i:self.patch_ycoord_i+self.patch_height,
self.patch_xcoord_i:self.patch_xcoord_i+self.patch_width]
quilted_extract = self.quilted_canvas[self.patch_ycoord_i:self.patch_ycoord_i+self.patch_height,
self.patch_xcoord_i:self.patch_xcoord_i+self.patch_width]
# expand and modify the target and mask
mask_tilde = np.copy(quilted_extract)
target_tilde = np.copy(target_extract)
target_tilde -= 0.5
target_tilde *= self.gam
target_tilde += 0.5
# import groundtruth information
if self.groundtruth:
groundtruth_extract = self.groundtruth_canvas[self.patch_ycoord_i:self.patch_ycoord_i+self.patch_height,
self.patch_xcoord_i:self.patch_xcoord_i+self.patch_width]
has_truth = np.isfinite(groundtruth_extract)
target_tilde[has_truth] = groundtruth_extract[has_truth]
mask_tilde[has_truth] = 1
# reshape and store in feeds
target_flat = target_tilde.reshape(1, -1)
self.targets0 = np.tile(target_flat, (self.batch_dim, 1))
mask_flat = mask_tilde.flatten()
self.masks0 = np.zeros((self.batch_dim, self.patch_width*self.patch_height), dtype=np.float32)
self.masks0[:, mask_flat] = 1.
self.patches0 = np.zeros((1, self.patch_width, self.patch_height), dtype=np.float32)
# convert the masks to images for plotting
self.mask_as_image = np.reshape(self.masks0[0,:],
(self.patch_width, self.patch_height))
self.target_as_image = np.reshape(self.targets0[0,:],
(self.patch_width, self.patch_height))
self.patch0_as_image = np.reshape(self.patches0,
(self.patch_width, self.patch_height))
class EfrosPainter(CanvasPainter):
def __init__(self, stratgan, paint_label,
canvas_width, canvas_height,
patch_overlap, patch_overlap_threshold,
ground_truth_weight=8):
CanvasPainter.__init__(self, stratgan=stratgan,
paint_label=paint_label,
canvas_width=canvas_width,
canvas_height=canvas_height,
patch_overlap=patch_overlap)
self.patch_overlap_threshold = patch_overlap_threshold
self.ground_truth_weight = ground_truth_weight
self.paint_batch_size = 1
def generate_next_patch(self):
patch_overlap_threshold_this_patch = self.patch_overlap_threshold
self.match = False
self.patch_loop = 0
# loop until a matching patch is found, increasing thresh each time
while not self.match:
# get a new patch
next_patch = self.generate_random_patch()
if self.groundtruth_cores:
# check for error against cores
core_error = self.get_core_error(next_patch)
# check patch error against core thresh
if core_error <= self.core_threshold_error:
pass # continue on to check overlap error
else:
self.patch_loop += 1
if np.mod(self.patch_loop, 100) == 0:
sys.stdout.write(" [%-20s] %-3d%% | [%02d]/[%d] patches | core threshold: %2d\n" %
('='*int((self.patch_i*20/self.patch_count)), int(self.patch_i/self.patch_count*100),
self.patch_i, self.patch_count, self.core_threshold_error))
continue # end loop iteration and try new patch
# calculate error on the patch overlap
patch_error, patch_error_surf = self.calculate_patch_error_surf(next_patch)
# sum/2 if it's a two-sided patch
if len(patch_error.shape) > 0:
patch_error = patch_error.sum() / 2
if patch_error <= patch_overlap_threshold_this_patch:
self.match = True
else:
patch_overlap_threshold_this_patch *= 1.01 # increase by 1% error threshold
self.patch_loop += 1
return next_patch
def generate_random_patch(self):
# use the GAN to make a random guess patch
z = np.random.uniform(-1, 1, [self.paint_batch_size, self.config.z_dim]).astype(np.float32)
paint_label = self.paint_label
patch = self.sess.run(self.stratgan.G, feed_dict={self.stratgan.z: z,
self.stratgan.y: paint_label,
self.stratgan.is_training: False})
r_patch = patch[0].reshape(self.config.h_dim, self.config.h_dim)
return r_patch
def get_core_error(self, next_patch):
core_loc_match = np.logical_and(self.core_loc >= self.patch_xcoord_i,
self.core_loc < self.patch_xcoord_i+self.patch_width-self.core_width)
# check for anyting in the core list
if np.any( core_loc_match ):
core_idx = np.argmax(core_loc_match)
canvas_overlap = self.canvas[self.patch_ycoord_i:self.patch_ycoord_i+self.patch_height,
self.core_loc[core_idx]:self.core_loc[core_idx]+self.core_width]
patch_overlap = next_patch[:, self.core_loc[core_idx]-self.patch_xcoord_i:self.core_loc[core_idx]-self.patch_xcoord_i+self.core_width]
ec = np.linalg.norm( (patch_overlap) - (canvas_overlap))
self.core_threshold_error = np.sqrt( (canvas_overlap.size - np.sum(canvas_overlap)) * 0.6 ) * (1+self.patch_loop/10000)
if self.core_threshold_error == 0.0:
self.core_threshold_error = 6.0
else:
ec = 0.0
return ec |
amoodie/StratGAN | StratGAN/context_painter.py | import matplotlib.pyplot as plt
import matplotlib.patches as patches
import sys
import numpy as np
from random import randint
import tensorflow as tf
import os
class ContextPainter(object):
def __init__(self, stratgan,
paint_label=None, paint_width=1000, paint_height=None,
paint_overlap=24, paint_overlap_thresh=10.0,
paint_core_source='block',
paint_ncores=0, paint_core_thresh=0.01,
batch_dim=40):
print(" [*] Building painter...")
self.sess = stratgan.sess
self.stratgan = stratgan
self.config = stratgan.config
self.paint_samp_dir = self.stratgan.paint_samp_dir
self.out_data_dir = self.stratgan.out_data_dir
self.batch_dim = batch_dim
if not paint_label == 0 and not paint_label:
print('Label not given for painting, assuming zero for label')
self.paint_label = np.zeros((batch_dim, stratgan.data.n_categories))
self.paint_label[:, 0] = 1
self.paint_int = 0
else:
# paint_label = tf.one_hot(paint_label, self.config.n_categories)
self.paint_label = np.zeros((batch_dim, stratgan.data.n_categories))
self.paint_label[:, paint_label] = 1
self.paint_int = paint_label
self.paint_width = paint_width
if not paint_height:
self.paint_height = int(paint_width / 4)
else:
self.paint_height = paint_height
self.overlap = paint_overlap
self.overlap_threshold = paint_overlap_thresh
self.patch_height = self.patch_width = self.config.h_dim
self.patch_size = self.patch_height * self.patch_width
# self.canvas = np.ones((self.paint_height, self.paint_width))
# generate the list of patch coordinates
# self.patch_xcoords, self.patch_ycoords = self.calculate_patch_coords()
# self.patch_count = self.patch_xcoords.size
graph = tf.get_default_graph()
self.gi = graph.get_tensor_by_name('gener/g_in:0')
self.go = graph.get_tensor_by_name('gener/g_prob:0')
self.do = graph.get_tensor_by_name('discr_1/Sigmoid:0')
# self.gl = graph.get_tensor_by_name('loss_g_op:0')
# self.gl = tf.log(1 - self.stratgan.D_fake)
self.gl = tf.log(1 - self.do)
# self.di = self.graph.get_tensor_by_name(model_name+'/'+disc_input)
[print(n.name) for n in tf.get_default_graph().as_graph_def().node]
# load the last core arrays
self.core_width = np.int(10)
self.core_val = np.load(os.path.join(self.out_data_dir, 'last_core_val.npy'))
self.core_loc = np.load(os.path.join(self.out_data_dir, 'last_core_loc.npy'))
# self.mask0 = np.zeros((self.patch_width, self.patch_height))
# self.image0 = np.zeros((self.patch_width, self.patch_height))
# for i in np.arange(len(self.core_val)):
# self.mask0[:, self.core_loc[i]:self.core_loc[i]+self.core_width] = 1
# self.image0[:, self.core_loc[i]:self.core_loc[i]+self.core_width] = self.core_val[:,:,i]
# self.x = tf.placeholder(tf.float32,
# [None, self.data.h_dim, self.data.w_dim, self.data.c_dim],
# name='x')
# self.y = tf.placeholder(tf.float32,
# [None, self.y_dim],
# name='y') # labels
# self.G_context = self.generator(_z=self.z_star,
# _labels=self.y,
# is_training=False,
# batch_norm=self.stratgan.config.batch_norm,
# scope_name='gener_context')
patch0_flag = 'blocks'
if patch0_flag == 'blocks':
self.mask0 = np.zeros((self.batch_dim, self.patch_width, self.patch_height), dtype=np.float32)
self.mask0[:,0:128,0:10] = 1
self.mask0[:,40:58,30:41] = 1
self.mask0[:,96:104,12:22] = 1
self.mask0[:,13:26,95:115] = 1
self.mask0 = self.mask0.reshape(self.batch_dim, -1)
self.image0 = 0.5 * np.ones((self.batch_dim, self.patch_width, self.patch_height), dtype=np.float32)
self.image0[:,0:128,0:10] = 1
self.image0[:,40:55,0:10] = 0
self.image0[:,40:58,30:35] = 0
self.image0[:,96:104,12:22] = 1
self.image0[:,13:26,95:115] = 0
self.image0 = self.image0.reshape(self.batch_dim, -1)
self.patch0 = np.zeros((1, self.patch_width, self.patch_height), dtype=np.float32)
elif patch0_flag == 'rand':
self.z_0 = np.random.uniform(-1, 1, [1, self.config.z_dim]).astype(np.float32)
patch = self.sess.run(self.stratgan.G,
feed_dict={self.stratgan.z: self.z_0,
self.stratgan.y: self.paint_label[0,:].reshape(-1,self.stratgan.config.n_categories),
self.stratgan.is_training: False})
self.patch0 = patch.squeeze()
randx = np.random.randint(low=0, high=self.patch_width, size=1000)
randy = np.random.randint(low=0, high=self.patch_height, size=1000)
self.mask0 = np.zeros((self.batch_dim, self.patch_width, self.patch_height), dtype=np.float32)
self.mask0[:, randx, randy] = 1
self.mask0 = self.mask0.reshape(self.batch_dim, -1)
self.image0 = 0.5 * np.ones((self.batch_dim, self.patch_width, self.patch_height), dtype=np.float32)
self.image0[:, randx, randy] = self.patch0[randx, randy]
self.image0 = self.image0.reshape(self.batch_dim, -1)
self.build_input_placeholders()
self.build_context_loss()
self.lam = 10.
self.perceptual_loss = self.gl
self.inpaint_loss = self.context_loss + self.lam*self.perceptual_loss
self.inpaint_grad = tf.gradients(self.inpaint_loss, self.gi)
def build_context_loss(self):
"""Builds the context and prior loss objective"""
# with self.graph.as_default():
self.go = tf.reshape(self.go, [self.batch_dim, -1])
self.context_loss = tf.reduce_sum(
tf.contrib.layers.flatten(
tf.abs(tf.multiply(self.masks, self.go) -
tf.multiply(self.masks, self.images))), 1)
def build_input_placeholders(self):
# with self.graph.as_default():
self.masks = tf.placeholder(tf.float32,
(self.batch_dim, self.patch_height*self.patch_width),
name='masks')
self.images = tf.placeholder(tf.float32,
(self.batch_dim, self.patch_height*self.patch_width),
name='images')
# self.z_in = tf.placeholder(tf.float32,
# self.stratgan.z_dim,
# name='z_optim')
def context_paint_image(self):
self.mask_as_image = np.reshape(self.mask0[0,:],
(self.patch_width, self.patch_height))
self.image_as_image = np.reshape(self.image0[0,:],
(self.patch_width, self.patch_height))
self.patch0_as_image = np.reshape(self.patch0,
(self.patch_width, self.patch_height))
v = 0
# self.z_inold = np.random.uniform(-1, 1, [1, self.config.z_dim]).astype(np.float32)
# print(self.z_inold.shape)
momentum = 0.8
lr = 0.001
self.z_in = np.random.normal(-1, 1, [self.batch_dim, self.config.z_dim]).astype(np.float32)
# self.z_in = tf.get_variable("z_in", [1, 100], tf.float32,
# initializer=tf.random_uniform_initializer())
self.writer = tf.summary.FileWriter(self.stratgan.train_log_dir,
graph=self.sess.graph)
self.writer.flush()
# self.z_optim = tf.train.AdamOptimizer(lr, beta1=0.6) \
# .minimize(self.inpaint_loss, var_list=self.z_in)
# self.saver.save(self.sess,
# os.path.join(self.train_chkp_dir, 'StratGAN'),
# global_step=3000)
# print("images: ", self.images)
for i in np.arange(200):
# out_vars = [self.stratgan.G, self.inpaint_loss, self.inpaint_grad]
in_dict={self.stratgan.z: self.z_in,
self.stratgan.y: self.paint_label,
self.stratgan.is_training: False,
self.masks: self.mask0,
self.images: self.image0}
# patch, loss, grad = self.sess.run(self.G, z_Adam, feed_dict=in_dict)
# patch, _ = self.sess.run([self.stratgan.G, self.z_optim], feed_dict=in_dict)
out_vars = [self.inpaint_loss, self.inpaint_grad, self.go]
loss, grad, patch = self.sess.run(out_vars, feed_dict=in_dict)
# print("grad:", grad[0].shape)
# print("loss:", loss)
# print("v:", v)
# self.sess.run(tf.clip_by_value(self.z_in, -1, 1))
# print("patch_shape:", patch.shape)
# print("loss_shape:", loss.shape)
# print("grad_shape:", grad.shape)
if False:
patch_reshaped = np.reshape(patch, (self.batch_dim, \
self.patch_width, self.patch_height))
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1)
msk = ax1.imshow(self.mask_as_image, cmap='gray')
msk.set_clim(0.0, 1.0)
ax2 = fig.add_subplot(2,2,2)
img = ax2.imshow(self.image_as_image, cmap='gray')
img.set_clim(0.0, 1.0)
ax3 = fig.add_subplot(2,2,3)
ptch0 = ax3.imshow(self.patch0_as_image, cmap='gray')
ptch0.set_clim(0.0, 1.0)
ax4 = fig.add_subplot(2,2,4)
best_patch_idx = np.argmin(np.sum(loss,1),0)
ptch = ax4.imshow(patch_reshaped[best_patch_idx,:,:], cmap='gray')
ptch.set_clim(0.0, 1.0)
# plt.savefig(os.path.join(self.paint_samp_dir, 'context_{}.png'.format(str(i).zfill(3))),
plt.savefig(os.path.join(self.paint_samp_dir, 'context_i.png'),
bbox_inches='tight', dpi=150)
plt.close()
v_prev = np.copy(v)
v = momentum*v - lr*grad[0]
self.z_in += (-momentum * v_prev +
(1 + momentum) * v)
self.z_in = np.clip(self.z_in, -1, 1)
# print("z shape:", z.shape)
verbose = True
if verbose:
print('Iteration {}: {}'.format(i, np.mean(loss)))
# print('z_in: {}'.format(self.z_in[0, 0:5]))
# print("perceptual loss:", self.perceptual_loss)
self.patchF = np.copy(patch)
# return patchF
# z = np.random.uniform(-1, 1, [1, self.config.z_dim]).astype(np.float32)
# paint_label = self.paint_label
# patch = self.sess.run(self.stratgan.G, feed_dict={self.stratgan.z: z,
# self.stratgan.y: paint_label,
# self.stratgan.is_training: False})
# r_patch = patch[0].reshape(self.config.h_dim, self.config.h_dim)
# # return r_patch
# print("perceptual loss:", self.perceptual_loss) |
amoodie/StratGAN | StratGAN/model.py | <reponame>amoodie/StratGAN
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
import os, sys
import loader
import ops
import utils
import paint
import groundtruth as gt
# import context_painter
from datagen import StratHeteroProvider
# from pympler.tracker import SummaryTracker, summary, muppy
# tracker = SummaryTracker()
# import types
# from pympler import asizeof
# import gc
class StratGAN(object):
def __init__(self, sess, config):
print('\n [*] Initializing model...')
self.sess = sess
self.config = config
# Load the dataset
print(' [*] Building dataset provider...')
self.data = loader.ImageDatasetProvider(image_dir=self.config.image_dir,
image_ext=config.image_ext,
c_dim=1,
batch_size=self.config.batch_size,
shuffle_data=True,
buffer_size=config.buffer_size,
drop_remainder=config.drop_remainder,
repeat_data=config.repeat_data,
a_min=None, a_max=None,
verbose=config.img_verbose)
# self.data = StratHeteroProvider(batch_size=32)
# grab some info from the data into the config
self.config.h_dim = self.data.h_dim
self.config.w_dim = self.data.w_dim
self.config.n_categories = self.data.n_categories
# Initialize the net model
print(' [*] Building model...')
self.build_model()
# write config file out
if not os.path.isfile(os.path.join(self.train_log_dir, 'config.json')):
utils.write_config(self)
# some other parameter / preference setups
transparent_plots = True
if transparent_plots:
plt.rcParams['savefig.transparent'] = True
def build_model(self):
# grab some parameters for convenience:
# -------------------
self.y_dim = self.data.n_categories
self.z_dim = self.config.z_dim
self.c_dim = self.data.c_dim
# instantiate placeholders:
# -------------------
self.x = tf.placeholder(tf.float32,
[None, self.data.h_dim, self.data.w_dim, self.data.c_dim],
name='x')
self.y = tf.placeholder(tf.float32,
[None, self.y_dim],
name='y') # labels
self.z = tf.placeholder(tf.float32,
shape=[None, self.config.z_dim],
name='z') # generator inputs
self.encoded = tf.placeholder(tf.int8,
shape=[None, self.data.n_categories],
name='encoded') # generator label inputs
self.is_training = tf.placeholder(tf.bool, name='is_training')
# instantiate networks:
# -------------------
self.G = self.generator(_z=self.z,
_labels=self.y,
is_training=self.is_training,
batch_norm=self.config.batch_norm)
self.D_real, self.D_real_logits = self.discriminator(self.x,
self.y,
reuse=False,
is_training=self.is_training,
batch_norm=self.config.batch_norm,
minibatch=self.config.minibatch_discrim) # real response
self.D_fake, self.D_fake_logits = self.discriminator(self.G,
self.y,
reuse=True,
is_training=self.is_training,
batch_norm=self.config.batch_norm,
minibatch=self.config.minibatch_discrim) # fake response
# decoder to convert one-hot labels to category numbers
self.decoder = tf.argmax(self.encoded, axis=1)
# define the losses
# -------------------
self.loss_d_real = tf.reduce_mean(ops.scewl(logits=self.D_real_logits,
labels=tf.ones_like(self.D_real)))
self.loss_d_fake = tf.reduce_mean(ops.scewl(logits=self.D_fake_logits,
labels=tf.zeros_like(self.D_fake)))
self.loss_d = self.loss_d_real + self.loss_d_fake
self.loss_g = tf.reduce_mean(ops.scewl(logits=self.D_fake_logits,
labels=tf.ones_like(self.D_fake)), name='loss_g_op')
self.loss_z = (tf.log(1 - self.D_fake, name='loss_z')) # log(1 − D(G(z)))
# alternative losses:
# self.loss_d_real = tf.log(self.D_real)
# self.loss_d_fake = tf.log(1. - self.D_fake)
# self.loss_d = -tf.reduce_mean(self.loss_d_real + self.loss_d_fake)
# self.loss_g = -tf.reduce_mean(tf.log(self.D_fake))
# define summary stats
# -------------------
self.summ_D_real = tf.summary.histogram("D_real", self.D_real)
self.summ_D_fake = tf.summary.histogram("D_fake", self.D_fake)
self.summ_G = tf.summary.image("G", tf.reshape(self.G,
[self.config.batch_size, self.data.h_dim, self.data.w_dim, -1]))
self.summ_loss_g = tf.summary.scalar("loss_g", self.loss_g)
self.summ_loss_d = tf.summary.scalar("loss_d", self.loss_d)
self.summ_loss_d_real = tf.summary.scalar("loss_d_real", self.loss_d_real)
self.summ_loss_d_fake = tf.summary.scalar("loss_d_fake", self.loss_d_fake)
self.summ_image = tf.summary.histogram("images", self.x)
self.summ_label = tf.summary.histogram("labels", self.y)
self.summ_z = tf.summary.histogram("zs", self.z)
# setup trainable
# -------------------
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
# a few more initializations
# -------------------
# directories for logging the training
self.train_log_dir = os.path.join(self.config.log_dir, self.config.run_dir)
self.train_samp_dir = os.path.join(self.config.samp_dir, self.config.run_dir)
self.train_chkp_dir = os.path.join(self.config.chkp_dir, self.config.run_dir)
self.saver = tf.train.Saver()
def generator(self, _z, _labels, is_training, batch_norm=False, scope_name='gener'):
print(' [*] Building generator...')
with tf.variable_scope(scope_name) as scope:
_batch_size = tf.shape(_z)[0] # dynamic batch size op
with tf.control_dependencies([_batch_size]):
s_h, s_w = self.data.h_dim, self.data.w_dim
s_h2, s_h4 = int(s_h/2), int(s_h/4)
s_w2, s_w4 = int(s_w/2), int(s_w/4)
# give an identity to the gener input
_z = tf.identity(_z, name="g_in")
# reshape the labels for concatenation to feature axis of conv tensors
_labels_r = tf.reshape(_labels, [_batch_size, 1, 1, self.y_dim])
# fully connected, layer 0
g_c0 = ops.condition_concat([_z, _labels], axis=1, name='g_cat0')
g_h0 = ops.linear_layer(g_c0, self.config.gfc_dim,
is_training=is_training,
scope='g_h0', batch_norm=batch_norm)
g_h0 = tf.nn.relu(g_h0)
# fully connected, layer 1
g_c1 = ops.condition_concat([g_h0, _labels], axis=1, name='g_cat1')
g_h1 = ops.linear_layer(g_c1, self.config.gf_dim*2*s_h4*s_w4,
is_training=is_training,
scope='g_h1', batch_norm=batch_norm)
g_h1 = tf.nn.relu(g_h1)
# deconvolution, layer 2
g_r2 = tf.reshape(g_h1, [_batch_size, s_h4, s_w4, self.config.gf_dim * 2])
g_c2 = ops.condition_conv_concat([g_r2, _labels_r], axis=3,
name='g_cat2')
g_h2 = ops.conv2dT_layer(g_c2, [_batch_size, s_h2, s_w2, self.config.gf_dim * 2],
is_training=is_training,
scope='g_h2', batch_norm=batch_norm)
g_h2 = tf.nn.relu(g_h2)
# deconvolution, layer 3
g_c3 = ops.condition_conv_concat([g_h2, _labels_r], axis=3,
name='g_cat3')
g_h3 = ops.conv2dT_layer(g_c3, [_batch_size, s_h, s_w, self.data.c_dim],
is_training=is_training,
scope='g_h3', batch_norm=False)
g_prob = tf.nn.sigmoid(g_h3, name='g_prob')
return g_prob
def discriminator(self, _images, _labels, is_training,
reuse=False, batch_norm=False, minibatch=False):
print(' [*] Building discriminator...')
flat_shape = int( (self.data.w_dim / 4)**2 * (self.config.df_dim + self.data.n_categories) )
with tf.variable_scope('discr') as scope:
if reuse:
scope.reuse_variables()
# reshape the labels for concatenation to feature axis of conv tensors
_labels_r = tf.reshape(_labels, [-1, 1, 1, self.y_dim])
# convolution, layer 0
d_c0 = ops.condition_conv_concat([_images, _labels_r], axis=3,
name='d_cat0')
d_h0 = ops.conv2d_layer(d_c0, self.data.c_dim + self.data.y_dim,
k_h=5, k_w=5, d_h=2, d_w=2,
scope='d_h0', batch_norm=False)
d_h0 = tf.nn.leaky_relu(d_h0, alpha=self.config.alpha)
# convolution, layer 1
d_c1 = ops.condition_conv_concat([d_h0, _labels_r], axis=3,
name='d_cat1')
d_h1 = ops.conv2d_layer(d_c1, self.config.df_dim + self.y_dim,
is_training=is_training,
k_h=5, k_w=5, d_h=2, d_w=2,
scope='d_h1', batch_norm=batch_norm)
d_h1 = tf.nn.leaky_relu(d_h1, alpha=self.config.alpha)
# fully connected, layer 2
d_r2 = tf.reshape(d_h1, [-1, flat_shape])
d_c2 = ops.condition_concat([d_r2, _labels], axis=1,
name='d_cat2')
d_h2 = ops.linear_layer(d_c2, self.config.dfc_dim,
is_training=is_training,
scope='d_h2', batch_norm=batch_norm)
d_h2 = tf.nn.leaky_relu(d_h2, alpha=self.config.alpha)
# minibatch discrim, optional layer
if minibatch:
d_h2 = ops.minibatch_discriminator_layer(d_h2, num_kernels=5, kernel_dim=3)
# fully connected, layer 3
d_c3 = ops.condition_concat([d_h2, _labels], axis=1,
name='d_cat3')
d_h3 = ops.linear_layer(d_c3, 1,
is_training=False,
scope='d_h3', batch_norm=False)
d_prob = tf.nn.sigmoid(d_h3)
return d_prob, d_h3
def train(self):
print(' [*] Beginning training...')
# solvers:
# -------------------
d_optim = tf.train.AdamOptimizer(self.config.learning_rate,
beta1=self.config.beta1) \
.minimize(self.loss_d, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(self.config.learning_rate,
beta1=self.config.beta1) \
.minimize(self.loss_g, var_list=self.g_vars)
# initialize all variables
z_batch = np.zeros(([self.config.batch_size, self.config.z_dim]))
self.sess.run(tf.global_variables_initializer(), feed_dict={self.z: z_batch})
# initialize summary variables recorded during training
self.summ_g = tf.summary.merge([self.summ_D_fake, self.summ_G,
self.summ_loss_d_fake, self.summ_loss_g])
self.summ_d = tf.summary.merge([self.summ_D_real, self.summ_loss_d_real,
self.summ_loss_d])
self.summ_input = tf.summary.merge([self.summ_image, self.summ_label,
self.summ_z])
self.writer = tf.summary.FileWriter(self.train_log_dir, self.sess.graph)
# set of training random z and label tensors for training gifs
self.training_zs, self.training_labels = utils.training_sample_set(
self.config.z_dim,
self.data.n_categories)
cnt = 0
start_time = time.time()
print(" Start time: ", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time)))
# finalize to make sure no more ops are added!
# self.sess.graph.finalize()
for epoch in np.arange(self.config.epoch):
for batch in np.arange(self.data.n_batches):
# grab the new batch:
# -------------------
_image_batch, _label_batch = self.sess.run(self.data.next_batch)
z_batch = np.random.uniform(-1, 1, [self.config.batch_size, self.config.z_dim]) \
.astype(np.float32)
# copy to prevent consumption during training
image_batch = _image_batch.copy()
label_batch = _label_batch.copy()
# optional augmentation
if self.config.noisy_inputs:
image_batch = image_batch + 1 * np.random.normal(0, 0.1, size=image_batch.shape)
if self.config.flip_inputs:
image_batch = 1 - image_batch
# update networks:
# -------------------
# update D network
_, summary_str = self.sess.run([d_optim, self.summ_d],
feed_dict={self.x: image_batch,
self.y: label_batch,
self.z: z_batch,
self.is_training: True})
self.writer.add_summary(summary_str, cnt)
# update G network
for g in np.arange(self.config.gener_iter):
_, summary_str = self.sess.run([g_optim, self.summ_g],
feed_dict={self.z: z_batch,
self.y: label_batch,
self.is_training: True})
self.writer.add_summary(summary_str, cnt)
# calculate new errors for printing
self.err_D_fake = self.loss_d_fake.eval({ self.z: z_batch,
self.y: label_batch,
self.is_training: False })
self.err_D_real = self.loss_d_real.eval({ self.x: image_batch,
self.y: label_batch,
self.is_training: False })
self.err_G = self.loss_g.eval({ self.z: z_batch,
self.y: label_batch,
self.is_training: False })
# make records and samples:
# -------------------
# sample interval
if cnt % 20 == 0:
self.train_sampler(self.training_zs, _labels=self.training_labels,
train_time=[epoch, batch], samp_dir=self.train_samp_dir)
# record chkpt
if np.mod(cnt, 500) == 2:
self.saver.save(self.sess,
os.path.join(self.train_chkp_dir, 'StratGAN'),
global_step=cnt)
# print the current training state
cnt += 1
print(" Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.6f, g_loss: %.6f" \
% (epoch+1, self.config.epoch, batch+1, self.data.n_batches,
time.time() - start_time, self.err_D_fake+self.err_D_real, self.err_G))
# debugging memory leaking:
# -------------------
# objList = muppy.get_objects()
# my_types = muppy.filter(objList, Type=(list))
# sum1 = summary.summarize(objList)
# summary.print_(sum1)
# loadersize = utils.getsize(self.data)
# print('loadersize:', loadersize)
# loadersize = asizeof.asizeof(self.data)
# print('loadersize:', loadersize)
# loadersize = asizeof.asizeof(self)
# print('self', loadersize)
# for obj in gc.get_objects():
# if isinstance(obj, list):
# print(obj)
def train_sampler(self, z, _labels=None, train_time=None, samp_dir='samp'):
if not train_time:
train_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
samp_name = 'g_{0}.png'.format(train_time)
else:
epoch = train_time[0]
batch = train_time[1]
samp_name = 'g_{0}_{1}.png'.format(str(epoch+1).zfill(3),
str(batch).zfill(4))
samples, decoded = self.sess.run([self.G, self.decoder],
feed_dict={self.z: z,
self.y: _labels,
self.encoded: _labels,
self.is_training: False})
fig = utils.plot_images(samples, image_dim=self.data.h_dim,
n_categories=self.data.n_categories,
labels=decoded)
file_name = os.path.join(samp_dir, samp_name)
plt.savefig(file_name, bbox_inches='tight')
plt.close(fig)
print("Sample: {file_name}".format(file_name=file_name))
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
def paint(self, pconfig):
print(" [*] beginning painting routines")
# directories for logging the painting
self.paint_samp_dir = os.path.join(self.config.paint_dir, self.config.run_dir)
self.out_data_dir = os.path.join(self.config.out_dir, self.config.run_dir)
self.pconfig = pconfig
self.pconfig.out_data_dir = self.out_data_dir
# initialize the painter object
if self.pconfig.patcher == 'efros':
self.painter = paint.EfrosPainter(self, paint_label=self.pconfig.label,
canvas_width=self.pconfig.width,
canvas_height=self.pconfig.height,
patch_overlap=self.pconfig.overlap,
patch_overlap_threshold=self.pconfig.overlap_thresh)
elif self.pconfig.patcher == 'context':
self.painter = paint.ContextPainter(self, paint_label=self.pconfig.label,
canvas_width=self.pconfig.width,
canvas_height=self.pconfig.height,
patch_overlap=self.pconfig.overlap,
patch_overlap_threshold=self.pconfig.overlap_thresh,
batch_dim=40)
# add a ground truth object
if self.pconfig.groundtruth:
if self.pconfig.groundtruth_type == 'core':
groundtruth = gt.GroundTruthCores(pconfig=self.pconfig,
painter_canvas=self.painter.canvas,
n_cores=self.pconfig.n_cores)
else:
raise ValueError('bad groundtruth value')
self.painter.add_groundtruth(groundtruth)
else:
self.painter.groundtruth = False
# sample now initialized
if self.pconfig.savefile_root:
self.painter.canvas_plot(self.pconfig.savefile_root+'_canvas_initial.png',
verticies=True, cmap='gray_r')
# main fill operation
self.painter.fill_canvas()
# sample now filled
if self.pconfig.savefile_root:
self.painter.canvas_plot(self.pconfig.savefile_root+'_canvas_final.png')
if self.pconfig.savefile_root and True:
# output the canvas to a numpy array
np.save(os.path.join(self.out_data_dir, self.pconfig.savefile_root+'_canvas_final.npy'),
self.painter.canvas)
def post_sampler(self, linear_interp=False, label_interp=False,
random_realizations=False, context_loss=False):
print(" [*] beginning post sampling routines")
self.post_samp_dir = os.path.join(self.config.post_dir, self.config.run_dir)
if linear_interp:
"""if >0, the number of *legs* of interpolation to do"""
ninterp = int(linear_interp)
nsamp = 100
label = 3
print(" [*] beginning linear interp between {0} points with {1} samples".format(ninterp+1, nsamp))
pts = np.random.uniform(-1, 1, [ninterp+1, self.config.z_dim]) \
.astype(np.float32)
if ninterp >= 2:
pts[-1, :] = pts[0, :]
mat = np.zeros((nsamp*ninterp, self.config.z_dim), np.float32)
for i in np.arange(ninterp):
for j in np.arange(self.config.z_dim):
v = np.linspace(pts[i, j], pts[i+1, j], num=nsamp, dtype=np.float32)
# print(v)
mat[i*nsamp:i*nsamp+nsamp, j] = v.transpose()
lab = label * np.zeros((nsamp*ninterp, self.data.n_categories), np.float32)
lab[:, label] = 1
for i in np.arange(nsamp*ninterp):
sample = self.sess.run(self.G,
feed_dict={self.z: mat[i, :].reshape(1, self.config.z_dim),
self.y: lab[i, :].reshape(1, self.data.n_categories),
self.is_training: False})
fig, ax = plt.subplots()
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.squeeze(), cmap='Greys_r')
file_name = os.path.join(self.post_samp_dir, '%04d.png' % i)
plt.savefig(file_name, bbox_inches='tight', dpi=200)
plt.close(fig)
print("Sample: {file_name}".format(file_name=file_name))
if label_interp:
"""if true, do a linear interpolation across the labels for constant random vect"""
nlabels = self.data.n_categories
nsamp = 10
print(" [*] beginning label interp")
z = np.random.uniform(-1, 1, [1, self.config.z_dim]).astype(np.float32)
lab = np.zeros((nsamp*nlabels, nlabels), np.float32)
print("lab_shape:", lab.shape)
for i in np.arange(nlabels):
v = np.linspace(0.1, 1, num=nsamp, dtype=np.float32)
print(v)
lab[i*nsamp:i*nsamp+nsamp, i] = v.transpose()
for i in np.arange(nsamp*nlabels):
sample = self.sess.run(self.G,
feed_dict={self.z: z,
self.y: lab[i, :].reshape(1, self.data.n_categories),
self.is_training: False})
fig, ax = plt.subplots()
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.squeeze(), cmap='Greys_r')
file_name = os.path.join(self.post_samp_dir, '%04d.png' % i)
plt.savefig(file_name, bbox_inches='tight', dpi=200)
plt.close(fig)
print("Sample: {file_name}".format(file_name=file_name))
if random_realizations:
"""if true make n random realizations"""
nrand = 100
label = 3
print(" [*] beginning {0} random realizations".format(nrand))
pts = np.random.uniform(-1, 1, [nrand, self.config.z_dim]) \
.astype(np.float32)
lab = np.zeros((nrand, self.data.n_categories), np.float32)
lab[:, label] = 1
for i in np.arange(nrand):
sample = self.sess.run(self.G,
feed_dict={self.z: pts[i, :].reshape(1, self.config.z_dim),
self.y: lab[i, :].reshape(1, self.data.n_categories),
self.is_training: False})
fig, ax = plt.subplots()
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.squeeze(), cmap='Greys_r')
file_name = os.path.join(self.post_samp_dir, '%04d.png' % i)
plt.savefig(file_name, bbox_inches='tight', dpi=200)
plt.close(fig)
print("Sample: {file_name}".format(file_name=file_name))
|
amoodie/StratGAN | post_analysis/mean_map.py | <reponame>amoodie/StratGAN
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
import cv2 as cv
import shutil
def make_overlay(canvas):
gt_idx = np.isfinite(canvas)
channel_idx = canvas == 0.0
canvas_overlay = np.zeros((canvas.shape[0], canvas.shape[1], 4))
canvas_overlay[np.logical_and(gt_idx, channel_idx), 0] = 61/255 # R channel, channel
canvas_overlay[np.logical_and(gt_idx, np.invert(channel_idx)), 0] = 177/255 # R channel, mud
canvas_overlay[np.logical_and(gt_idx, channel_idx), 1] = 116/255 # G channel
canvas_overlay[np.logical_and(gt_idx, np.invert(channel_idx)), 1] = 196/255 # G channel
canvas_overlay[np.logical_and(gt_idx, channel_idx), 2] = 178/255 # B channel
canvas_overlay[np.logical_and(gt_idx, np.invert(channel_idx)), 2] = 231/255 # B channel
canvas_overlay[gt_idx, 3] = 1 * 0.8
return canvas_overlay
def process_to_numpix(canvas):
# cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
# cv2.CHAIN_APPROX_SIMPLE)
# cnts = imutils.grab_contours(cnts)
canvas = canvas.astype(np.uint8)
im2, contours, hierarchy = cv.findContours(canvas, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
return im2, contours, hierarchy
def compute_area_index(canvas, contours, index):
cnt = contours[0]
M = cv.moments(cnt)
# cx = int(M['m10']/M['m00'])
# cy = int(M['m01']/M['m00'])
# 2. Contour Area
# Contour area is given by the function cv.contourArea() or from moments, M['m00'].
area = cv.contourArea(cnt)
return area
def canvas_plot(canvas, groundtruth_canvas_overlay,
filename, cmap='gray', verticies=False):
fig, ax = plt.subplots()
samp = ax.imshow(canvas, cmap=cmap)
plt.imshow(groundtruth_canvas_overlay)
ax.axis('off')
plt.savefig(os.path.join(filename), bbox_inches='tight', dpi=300)
plt.close()
# file list
filelist = glob.glob(os.path.join(os.path.pardir, "StratGAN", "out", "line7", "*_final.npy"))
# load ground truth array
groundtruth=True
groundtruth_canvas_overlay = np.load(os.path.join(os.path.pardir, "StratGAN", "out", "line7", "4trial_groundtruth_canvas.npy"))
groundtruth_canvas_overlay = make_overlay(groundtruth_canvas_overlay)
# open first to size objects
temp = np.load(filelist[0])
mean_array = np.zeros(temp.shape)
temp = None # clear ref
# loop to average
cp_cnt = 0
med_size = np.zeros((len(filelist)))
for i in np.arange(len(filelist)):
# grab ith
ith = np.load(filelist[i])
# compute flood fill
# find area flooded by known channel idx
mask_in = np.zeros((ith.shape[0]+2,ith.shape[1]+2),np.uint8)
corner_idxs = np.array([[210, 123], [217, 123], [210, 144], [217, 144]])
mask_size = np.zeros((ith.shape[0]+2, ith.shape[1]+2, corner_idxs.shape[0]))
# print(corner_idxs.shape)
for j in np.arange(corner_idxs.shape[0]):
# print(corner_idxs[1])
num, im, mask_size[:,:,j], rect = cv.floodFill(ith.astype(np.uint8), mask_in,
(corner_idxs[j][0], corner_idxs[j][1]), 255)
sum_list = np.sum(np.sum(mask_size==1,0),0)
# find median and use this for now
if np.all(np.median(sum_list) == sum_list) and np.median(sum_list) < 8000:
med_size[i] = np.median(sum_list)
else:
med_size[i] = np.nan
# make the mean map and image for gif
if not np.isnan(med_size[i]):
mean_array = ( (mean_array*(i+1)) + ith ) / (i + 1)
# make a figure and save it for giffing
if False:
canvas_plot(ith, groundtruth_canvas_overlay,
filename=os.path.join("elite", str(cp_cnt).zfill(4)+'.png'))
# shutil.copy(filelist[i], "elite_"+str(cp_cnt).zfill(3)+'.png')
cp_cnt += 1
fig, ax = plt.subplots()
samp = ax.imshow(mean_array, cmap="gray")
if groundtruth:
plt.imshow(groundtruth_canvas_overlay)
ax.axis('off')
plt.savefig(os.path.join("mean_array_map.png"), bbox_inches='tight', dpi=300)
plt.close()
med_size = med_size[~np.isnan(med_size)]
from scipy import stats
# data = [1.5]*7 + [2.5]*2 + [3.5]*8 + [4.5]*3 + [5.5]*1 + [6.5]*8
density = stats.kde.gaussian_kde(med_size)
x = np.arange(4000, 8000, 10)
vect = density(x)
ps = np.array([0.1, 0.5, 0.9])
Ps = np.interp(ps, np.cumsum(vect/np.sum(vect)), x)
Ys = np.interp(Ps, x, vect)
PsFull = np.hstack((4000, Ps, 8000)) # add on the extremes for looping
colset = [(247/255,245/255,113/255),(244/255,153/255,113/255),
(244/255,153/255,113/255),(247/255,245/255,113/255)]
fig, ax = plt.subplots()
plt.hist(med_size, range=(4000, 8000), density=True, color='lightgray', alpha=1) #, edgecolor='black', linewidth=1.2)
ax.set_xlabel("reservoir area (px)")
ax.set_ylabel("density")
plt.savefig(os.path.join("example_size_dist.png"), bbox_inches='tight', dpi=300)
for i in np.arange(Ps.size+1):
plt.fill_between(x[np.logical_and(x>=PsFull[i], x<=PsFull[i+1])], 0,
vect[np.logical_and(x>=PsFull[i], x<=PsFull[i+1])],
color=colset[i], zorder=2, alpha=0.8)
for i in np.arange(Ps.size):
plt.vlines(Ps[i], 0, Ys[i])
plt.text(Ps[i]+50, 0.0002, "P"+str((ps[i]*100).astype(np.uint16)))
plt.plot(x, density(x), color='k')
plt.savefig(os.path.join("example_size_dist_density.png"), bbox_inches='tight', dpi=300)
plt.close() |
amoodie/StratGAN | StratGAN/datagen.py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.cm as cm
import cv2
import tensorflow as tf
class StratHeteroProvider(object):
def __init__(self, batch_size=64):
self.data_generator = StratHeteroGenerator(n_categories=None)
self.data = tf.data.Dataset.from_generator(lambda: self.data_generator,
(tf.float32, tf.float32),
(tf.TensorShape((64, 64, 1)),
tf.TensorShape((self.data_generator.n_categories))))
# self.data = self.data.map(self._parse_function, num_parallel_calls=num_threads)
# self.data = self.data.prefetch(prefetch_buffer)
self.batch_size = batch_size
self.data = self.data.batch(self.batch_size)
# create iterator and final input tensors
self.iterator = self.data.make_one_shot_iterator()
# self.image_batch, self.label_batch = self.iterator.get_next()
self.next_batch = self.iterator.get_next()
# self.data = self.data.prefetch(1)
self.data.h_dim = self.data.w_dim = 64
self.h_dim, self.w_dim = self.data.h_dim, self.data.w_dim
self.c_dim = self.data.c_dim = 1
self.data.n_categories = self.n_categories = self.data_generator.n_categories
self.data.y_dim = self.y_dim = self.n_categories
self.data_shape = [self.batch_size, self.h_dim, self.w_dim, self.c_dim]
self.n_batches = 50
class StratHeteroGenerator(object):
def __init__(self, width=64, height=64, nc_min=1, nc_max=10,
cw_mu=30, cw_sig=10, ch_mu=6, ch_sig=1,
batch_size=64, n_categories=None):
"""
Inputs:
width : image width
height : image height
nc_min : min number of channels
mc_max :
cw_mu30 :
cw_sig :
ch_mu :
ch_sig :
"""
## set up the variables
# channel scale heterogeneity follows a discrete uniform distribution
self.nc_min = nc_min # min number of channels
self.nc_max = nc_max # max number of channels
# bed scale heretogeneity follows a truncated normal distribution (0,1)
self.P_b_mu = 0 # mean of draw
self.P_b_sig = 0.25 # std of dist
# channel size dists
self.cw_mu = cw_mu # width mean
self.ch_mu = ch_mu # height mean
self.cw_sig = cw_sig # width std
self.ch_sig = ch_sig# height std
self.nx = width # cols
self.ny = height # rows
self.empty = np.zeros((self.nx, self.ny)) # init a strike section array
# self.empty.fill(np.nan)
# other info
self.batch_size = batch_size
if not n_categories:
self.n_categories = self.nc_max
else:
self.n_categories = n_categories
# list of distortion functions to pick from
self.dist_func = [self.random_gaussian_blur,
self.random_brightness_contrast,
self.random_noise]
def __iter__(self):
return self
def __next__(self):
# output is a single np.array to train with
strk = self.empty.copy()
nc = np.random.randint(low=self.nc_min, high=self.nc_max+1) # number of channels
# P_b_mu = 0 #np.random.normal(loc=0, scale=0.5) # mean of reduction from 1
img_P_b_sig = np.abs(np.random.normal(loc=self.P_b_mu, scale=self.P_b_sig)) + 1e-6 # std of dist
for c in np.arange(nc):
cw = np.round(np.random.normal(loc=self.cw_mu, scale=self.cw_sig)).astype(np.int)
ch = np.round(np.random.normal(loc=self.ch_mu, scale=self.ch_sig)).astype(np.int)
cw, ch = np.clip(cw, 0, self.nx-1), np.clip(ch, 0, self.ny-1)
cl_w = np.random.randint(low=0,high=self.nx-cw) # channel lower left x
cl_h = np.random.randint(low=0,high=self.ny-ch) # channel lower left y
c_het = np.abs( np.random.normal(loc=self.P_b_mu, scale=img_P_b_sig, size=(ch,cw)) ) # channel heterogeneity matrix
c_het = np.clip(c_het, 0, 0.5) # clip to range
strk[cl_h:cl_h+ch,cl_w:cl_w+cw] = 0.5 + c_het # replace values in strk with hetero matrix
# apply distortion
# if np.random.uniform() < 0.5:
# strk = self.random_gaussian_blur(strk)
# make the label vector a one-hot
label = nc-1
a = np.array([label])
b = np.zeros((self.n_categories))
b[a] = 1
# one_hot_label = tf.one_hot(label, self.n_categories)
one_hot_label = b.astype(np.float32)
return np.expand_dims(strk,2), one_hot_label
def random_gaussian_blur(self, image):
k_size = np.random.choice(np.arange(1, 5, 2))
image = cv2.GaussianBlur(image, (k_size, k_size), 0)
image = np.minimum(np.maximum(image, 0), 1)
return image
def random_brightness_contrast(self, image):
brightness = 0.25 + random.random() / 2
contrast = 0.25 + random.random() / 2
image = contrast * (image - np.mean(image)) / np.std(image) + brightness
image = np.minimum(np.maximum(image, 0), 1)
return image
def random_noise(self, image):
noise_var = random.random() / 20
noise = np.random.randn(image.shape[0], image.shape[1]) * noise_var
image += noise
image = np.minimum(np.maximum(image, 0), 1)
return image
if __name__ == '__main__':
dg = StratHeteroProvider(batch_size=50)
for i in np.arange(dg.batch_size):
img = next(dg)
cv2.imwrite("imgs/{0}.png".format(str(i).zfill(3)), img * 255)
# cv2.imwrite("mask.png", (input_mask * 255).astype(np.uint8))
# cv2.imwrite("width_map.png", width_map)
# add it to the plot
# ax = plt.subplot(gs[i])
# strk_cnv = ax.imshow(strk, cmap='gray_r')
# current_cmap = cm.get_cmap()
# current_cmap.set_bad(color='white')
# strk_cnv.set_clim(0, 1)
# ax.set_xticks([])
# ax.set_yticks([]) |
amoodie/StratGAN | process_images/make_demo_fig.py | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from PIL import Image
import numpy as np
filelist = [file for file in os.listdir('cut_images_demo') if file.endswith('.png')]
filelist.sort()
gd = (6, 3) # grid image dimensions
fig = plt.figure(figsize=(gd[1], gd[0]))
gs = gridspec.GridSpec(gd[0], gd[1])
gs.update(wspace=0.05, hspace=0.05)
labels = np.repeat([0, 1, 2, 3, 4, 5], 3)
print("labels:", labels)
print("filelist:", filelist)
for i, (file, label) in enumerate(zip(filelist, labels)):
image = Image.open(os.path.join('cut_images_demo', file))
ax = plt.subplot(gs[i])
ax.text(0.8, 0.8, str(label),
backgroundcolor='white', transform=ax.transAxes)
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(image, cmap='gray')
plt.savefig('input_demo.png', bbox_inches='tight', dpi=400)
|
amoodie/StratGAN | StratGAN/loader.py | """
image serving classes for model
"""
import glob
import numpy as np
from PIL import Image
import os
import utils
import tensorflow as tf
class BaseImageProvider(object):
"""
This class provides a basis for generating a tf.data.Dataset and Iterator.
It takes some hints the UNet implementation in tf.
"""
def __init__(self, image_dir, image_ext='*.png',
c_dim=None, a_min=None, a_max=None, verbose=False):
self.verbose = verbose
self.image_dir = image_dir
self.image_ext = image_ext
self._image_list = self._list_images(os.path.join(self.image_dir, self.image_ext))
self._label_list = self._parse_labels(self._image_list)
self.n_images = len(self._image_list)
self.n_categories = np.unique(np.array([l for l in self._label_list])).size
self.y_dim = self.n_categories
assert len(self._image_list) > 0, "No training files"
assert len(self._image_list) == len(self._label_list), "Unequal images/labels length"
self.a_min = a_min if a_min is not None else -np.inf
self.a_max = a_max if a_min is not None else np.inf
# try:
test_img = self.__load_image(self._image_list[0])
# except:
# Exception('could not load a test image, aborting...')
if c_dim is None:
print('Number of channels (c_dim) not provided, attempting to determine...')
self.c_dim = test_img.shape[-1]
print('Tested image had {0} dimension(s)'.format(self.c_dim))
else:
self.c_dim = c_dim
self.w_dim = test_img.shape[0]
self.h_dim = test_img.shape[1]
if self.verbose:
self.print_data_info()
def _list_images(self, image_dir):
all_files = glob.glob(image_dir)
return [name for name in all_files]
def _parse_labels(self, image_list):
"""
split the labels out of the image list
"""
path_splits = [path.split('/') for path in image_list]
image_names = [split[-1] for split in path_splits]
label_splits = [label.split('_') for label in image_names]
labels = [int(split[0]) for split in label_splits]
return labels
def __load_image(self, path, dtype=np.float32):
"""
single image reader, used for testing image to determine values if not given
"""
try:
img_array = np.array(Image.open(path), dtype)
except:
img_array = np.squeeze(cv2.imread(image_name, cv2.IMREAD_GRAYSCALE))
return img_array
def _make_inputs_fig(self, image_list, label_list):
pass
def print_data_info(self):
print(' Image directory: ', self.image_dir)
print(' Number of images: %s' % self.n_images)
print(' Categories in labels: ', self.n_categories)
print(' Image height: ', self.h_dim)
print(' Image width: ', self.w_dim)
#
#
# add more...
class ImageDatasetProvider(BaseImageProvider):
def __init__(self, image_dir, image_ext='*.png', c_dim=None, batch_size=None,
shuffle_data=True, buffer_size=1, drop_remainder=True, repeat_data=True,
a_min=None, a_max=None, verbose=False):
super().__init__(image_dir, image_ext, c_dim, a_min, a_max, verbose)
self.shuffle_data = shuffle_data
self.repeat_data = repeat_data
self.buffer_size = buffer_size
self.drop_remainder = drop_remainder
# convert to constants for tf
self.filenames = tf.constant(self._image_list)
self.labels = tf.constant(self._label_list)
# create dataset
self.data = tf.data.Dataset.from_tensor_slices((self.filenames, self.labels))
# map image in the dataset
self.data = self.data.map(self._load_image_func)
# process options to the dataset object
if len(self._label_list) < batch_size:
raise Exception("dataset size is less than batch_size")
if not self.drop_remainder:
raise Exception("only supporting drop remainder at present")
if batch_size is not None:
self.batch_size = batch_size
else:
self.batch_size = len(self._image_list) # full batch
print('Warning: no batch size given, using full batch')
self.data = self.data.batch(self.batch_size, drop_remainder=drop_remainder)
self.n_batches = self.n_images // self.batch_size
if self.shuffle_data:
self.data = self.data.shuffle(self.buffer_size)
if self.repeat_data:
self.data = self.data.repeat()
# create iterator and final input tensors
self.iterator = self.data.make_one_shot_iterator()
# self.image_batch, self.label_batch = self.iterator.get_next()
self.next_batch = self.iterator.get_next()
# self.data = self.data.prefetch(1)
self.data_shape = [self.batch_size, self.h_dim, self.w_dim, self.c_dim]
def _load_image_func(self, filename, label):
"""
load image function used to batch the files
"""
image_string = tf.read_file(filename)
# decode using jpeg
image_decoded = tf.image.decode_jpeg(image_string, channels=self.c_dim)
# This will convert to float values in [0, 1]
image = tf.image.convert_image_dtype(image_decoded, tf.float32)
# image = tf.cast(image_decoded, tf.float32) # same as above?
# make the label vector a one-hot
one_hot_label = tf.one_hot(label, self.n_categories)
return image, one_hot_label
# def _load_image_func(self, filename, label):
# """
# load image function used to batch the files
# """
# image = np.zeros((28,28,1))
# one_hot_label = np.array(([0, 0, 1, 0]))
# return image, one_hot_label
|
amoodie/StratGAN | process_images/nsplit_process.py | <filename>process_images/nsplit_process.py
from scipy import misc, ndimage
import matplotlib.pyplot as plt
from skimage import exposure
import numpy as np
import os
def rgb2gray(rgb):
conv = [0.2125, 0.7154, 0.0721] # ratios to convolve with
return np.dot(rgb[...,:3], conv)
# path of directory and list of raw images in directory
dirpath = './raw_images'
rawimgs = [f for f in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath, f))]
nhslice = 10 # number of slices across deposit
nvslice = 3 # number of slices in vertical
ntslice = nhslice * nvslice # total number of slicer per image
cropdims = np.array([500, 800]) # how much to cut off top and bottom
# for i in enumerate(rawimgs):
for i in enumerate(rawimgs):
print(i[1])
# load image
raw = misc.imread(os.path.join(dirpath, i[1]))
# extract shape and use to crop image down
lx, ly, lz = raw.shape
crop = raw[cropdims[0] : lx-cropdims[1], :, :]
# convert to grayscale
gray = rgb2gray(crop)
# binarize
thresh = [180]
bw = (gray > thresh)
# dilate, erode, etc
ero_p = ndimage.binary_opening(1.*np.invert(bw), structure=np.ones((3,3))).astype(np.int)
ero = np.invert(ero_p)
ero = gray
# ero = exposure.equalize_hist(ero)
img_adapteq = exposure.equalize_adapthist(ero, clip_limit=0.03)
# ero = numpy.percentile(a, q, axis=None)
# split the image into columns to loop through
hsplt = np.array_split(ero, nhslice, 1)
hsplt = [x for x in hsplt if x.size > 0]
for j in enumerate(hsplt):
# split the hsplt array into vertical chunks
vsplt = np.array_split(hsplt[j[0]], nvslice, 0)
vsplt = [x for x in vsplt if x.size > 0]
for k in enumerate(vsplt):
lab = (i[0]*ntslice + j[0]*nhslice + k[0])
misc.imsave('./cut_images/%06d.png' % lab, vsplt[k[0]])
# plt.imshow(hsplt[0], cmap='gray')
# plt.show(block=True)
# plt.imshow(crop)
# plt.show(block=False)
|
amoodie/StratGAN | process_images/nrand_process.py | <reponame>amoodie/StratGAN
from scipy import misc, ndimage
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# from skimage import exposure
import numpy as np
import os
np.random.seed(seed=21548)
plt.rcParams['savefig.transparent'] = True
def rgb2gray(rgb):
conv = [0.2125, 0.7154, 0.0721] # ratios to convolve with
return np.dot(rgb[...,:3], conv)
def cut(image, coord, dim):
if image.ndim > 2:
cut = image[coord[0]:coord[0]+dim, coord[1]:coord[1]+dim, :]
else:
cut = image[coord[0]:coord[0]+dim, coord[1]:coord[1]+dim]
return cut
def group_plot(samples):
fig = plt.figure(figsize=(4, 4))
gd = np.ceil(np.sqrt(len(samples))).astype(np.int)
gs = gridspec.GridSpec(gd, gd)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample, cmap='Greys_r')
return fig
# path of directory and list of raw images in directory
dir_path = './cropped_slices'
cropped_slices = [f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]
cropped_slices = sorted(cropped_slices)
n_cuts = 0 # number of cut images to pull out of each image
cut_dim = 64 # 28 # pixels size of WxH for cut images
for i, cropped_slice in enumerate(cropped_slices):
line_name = cropped_slice.rstrip('.jpg')
print('operating on: ', line_name)
# load image
raw = misc.imread(os.path.join(dir_path, cropped_slice))
# extract shape and use to crop image down
lx, ly, lz = raw.shape # switch x and y
# crop = raw[cropdims[0] : lx-cropdims[1], :, :]
rx, ry = np.array([lx, ly]) - (cut_dim)
steps_idx = np.array([np.random.randint(0, rx), np.random.randint(0, ry)])
# convert to grayscale
gray = rgb2gray(raw)
# binarize
thresh = 110 # 255 * 0.9
bw = np.array((gray > thresh)).astype(np.bool)
# dilate, erode, etc
dil = ndimage.binary_closing((bw), structure=np.ones((3,3)))
ero = ndimage.binary_opening(dil, structure=np.ones((3,3)))
clean = ero
plt.imshow(clean, cmap='gray')
plt.savefig('out/{0}line_full.png'.format(i), bbox_inches='tight', dpi=600)
plt.close()
plt.imshow(clean[:, 6000:8000], cmap='gray')
plt.savefig('out/{0}line_cropped.png'.format(i), bbox_inches='tight', dpi=300)
plt.close()
steps = [cut(raw, steps_idx, cut_dim), cut(gray, steps_idx, cut_dim), cut(bw, steps_idx, cut_dim), \
cut(dil, steps_idx, cut_dim), cut(ero, steps_idx, cut_dim)]
steps_fig = group_plot(steps)
plt.savefig('out/{0}line_steps_fig.png'.format(i), bbox_inches='tight')
plt.close(steps_fig)
# plt.hist(cut(clean, steps_idx, cut_dim).flatten())
# plt.savefig('out/steps_hist.png', bbox_inches='tight')
# plt.close()
for j in np.arange(n_cuts):
saved = False
while not saved:
rand_idx = np.array([np.random.randint(0, rx), np.random.randint(0, ry)])
rand_cut = cut(clean, rand_idx, cut_dim)
# perc_blk = np.count_nonzero(np.invert(rand_cut)) / rand_cut.size
perc_blk = 0.5
if perc_blk < 0.05 or perc_blk > 0.95:
saved = False
else:
lab = '{0}_{1}.png'.format(i, '%04d'%j) # label idx for one hot vector, jth image
misc.imsave(os.path.join('cut_images', lab), rand_cut.astype(np.int))
saved = True
# lab = '{0}_{1}.png'.format(i, '%04d'%j) # label idx for one hot vector, jth image
# misc.imsave(os.path.join('cut_images', lab), rand_cut.astype(np.int))
# saved = True
if j % 100 == 0:
print('cutting image {0} of {1}'.format(j+1, n_cuts))
misc.imsave(os.path.join('cut_images_demo', lab), rand_cut.astype(np.int))
|
amoodie/StratGAN | StratGAN/groundtruth.py | <reponame>amoodie/StratGAN
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import sys
import numpy as np
from random import randint
import tensorflow as tf
import os
import abc
class GroundTruth(object):
"""
Base class for adding ground truth to the core object
"""
def __init__(self, pconfig, painter_canvas,
overlay_alpha=0.8):
__metaclass__ = abc.ABCMeta
self.pconfig = pconfig
self.canvas_height = painter_canvas.shape[0]
self.canvas_width = painter_canvas.shape[1]
self.canvas = np.empty((self.canvas_height, self.canvas_width))
self.canvas.fill(np.nan)
self.canvas_overlay = np.zeros((self.canvas_height, self.canvas_width, 4))
self.overlay_alpha = overlay_alpha
self.type = None
self.out_data_dir = self.pconfig.out_data_dir
self.pconfig_to_groundtruth_source()
def pconfig_to_groundtruth_source(self):
if not self.pconfig.groundtruth_new and not self.pconfig.groundtruth_load:
raise RuntimeError('must specify either new or source for groundtruth')
if self.pconfig.groundtruth_new and self.pconfig.groundtruth_load:
raise RuntimeError('must not specify both new and source for groundtruth')
if self.pconfig.groundtruth_new:
self.groundtruth_new = True
self.groundtruth_source = self.pconfig.groundtruth_type
elif self.pconfig.groundtruth_load:
self.groundtruth_new = False
self.groundtruth_source = self.pconfig.groundtruth_load
self.groundtruth_save = self.pconfig.groundtruth_save
def make_overlay(self):
gt_idx = np.isfinite(self.canvas)
channel_idx = self.canvas == 0.0
self.canvas_overlay[np.logical_and(gt_idx, channel_idx), 0] = 61/255 # R channel, channel
self.canvas_overlay[np.logical_and(gt_idx, np.invert(channel_idx)), 0] = 177/255 # R channel, mud
self.canvas_overlay[np.logical_and(gt_idx, channel_idx), 1] = 116/255 # G channel
self.canvas_overlay[np.logical_and(gt_idx, np.invert(channel_idx)), 1] = 196/255 # G channel
self.canvas_overlay[np.logical_and(gt_idx, channel_idx), 2] = 178/255 # B channel
self.canvas_overlay[np.logical_and(gt_idx, np.invert(channel_idx)), 2] = 231/255 # B channel
self.canvas_overlay[gt_idx, 3] = 1 * self.overlay_alpha
class GroundTruthCores(GroundTruth):
"""
GroundTruth object for adding cores to the painter
"""
def __init__(self, pconfig, painter_canvas,
core_width=10,
n_cores=None):
GroundTruth.__init__(self, pconfig=pconfig, painter_canvas=painter_canvas)
self.type = 'core'
# generate any cores if needed, and quilt them into canvas
# self.core_source = self.pconfig.core_source
self.core_width = core_width
# generate the cores by the appropriate flag
if self.groundtruth_new:
# if self.groundtruth_source == 'block':
if True:
block_height = 24
if not n_cores:
self.n_cores = 1
else:
self.n_cores = n_cores
self.initialize_block_cores(n_cores=self.n_cores,
n_blocks=2, block_height=block_height)
self.meta = {'n_cores': self.n_cores}
else:
raise ValueError('bad core builder string given')
else:
print('loading core file from: ', self.groundtruth_source)
canvas = np.load(os.path.join(self.out_data_dir, self.groundtruth_source)+'_groundtruth_canvas.npy')
meta = np.load(os.path.join(self.out_data_dir, self.groundtruth_source)+'_groundtruth_meta.npy', allow_pickle=True)
self.canvas = canvas
self.meta = meta.flat[0]
self.n_cores = self.meta['n_cores']
# save it out (sometimes just overwrites what just got loaded)
if self.groundtruth_save:
np.save(os.path.join(self.out_data_dir, self.groundtruth_save+'_groundtruth_canvas.npy'), self.canvas)
np.save(os.path.join(self.out_data_dir, self.groundtruth_save+'_groundtruth_meta.npy'), self.meta)
self.make_overlay()
def initialize_block_cores(self, n_cores=2, n_blocks=3, block_height=10):
# make cores with n_blocks channel body segments. They are randomly
# placed into the column, and may be overlapping.
#
# preallocate cores array, pages are cores
core_loc = np.zeros((n_cores)).astype(np.int)
core_val = np.zeros((self.canvas_height, self.core_width, self.n_cores))
# make the core_val for each in n_cores
for i in np.arange(n_cores):
# preallocate a core matrix
core = np.ones([self.canvas_height, self.core_width])
# generate a random x-coordinate for top-left core corner
ul_coord = np.random.randint(low=0, high=self.canvas_width-self.core_width, size=1)
for j in np.arange(n_blocks):
# generate a random y-coordinate for the top of the block
y_coord = np.random.randint(low=0, high=self.canvas_height-block_height, size=1)[0]
core[y_coord:y_coord+block_height, :] = 0
# store the core into a multi-core matrix
core_val[:, :, i] = core
core_loc[i] = ul_coord
for i in np.arange(n_cores):
self.canvas[:, core_loc[i]:core_loc[i]+self.core_width] = core_val[:,:,i]
|
amoodie/StratGAN | StratGAN/ops.py | import tensorflow as tf
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
def scewl(logits, labels):
"""
just a straight wrapper for shorter text.
wraps: tf.nn.sigmoid_cross_entropy_with_logits()
"""
return tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)
def condition_concat(tensors, axis, *args, **kwargs):
return tf.concat(tensors, axis, *args, **kwargs)
def condition_conv_concat(tensors, axis=3, name='conv_concat'):
'''
Concatenate conditioning vector on feature map axis
'''
if len(tensors) > 2:
ValueError('more than 2 tensors in tensors. Only images and labels allowed.')
x = tensors[0]
y = tensors[1]
x_shapes = x.get_shape()
y_shapes = y.get_shape()
if not axis:
axis = tf.rank(x) + 1
# need to create a tensor with unknown shape, little hacky here
ones_dims = tf.stack([tf.shape(x)[0], x_shapes[1], x_shapes[2], y_shapes[3]])
ones_tensor = tf.fill(ones_dims, 1.0)
return condition_concat([x, y*ones_tensor],
axis=3, name=name)
def conv2d_layer(_input, output_size, is_training=None,
k_h=5, k_w=5, d_h=2, d_w=2, scope=None,
bias0=0.0, batch_norm=False, return_w=False):
_in_shape = _input.get_shape().as_list()
with tf.variable_scope(scope or 'conv2d'):
w = tf.get_variable("weights", [k_h, k_w, _in_shape[-1], output_size], tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
c = tf.nn.conv2d(_input, w, strides=[1, d_h, d_w, 1], padding='SAME')
b = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias0))
m = tf.nn.bias_add(c, b)
# KILLED THIS RESHAPE -- c and m had the same shape so is it needed??
# conv = tf.reshape(m, c.get_shape())
conv = m
if batch_norm:
bn_scale = tf.get_variable("bn_scale", output_size, tf.float32,
initializer=tf.constant_initializer(1.0))
bn_beta = tf.get_variable("bn_beta", output_size, tf.float32,
initializer=tf.constant_initializer(0.0))
pop_mean = tf.Variable(tf.zeros(output_size),
trainable=False)
pop_var = tf.Variable(tf.ones(output_size),
trainable=False)
def training_true():
decay = 0.95
bn_mean, bn_var = tf.nn.moments(conv, axes=[0, 1, 2], keep_dims=False)
train_mean = tf.assign(pop_mean,
pop_mean * decay + bn_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + bn_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
bn_conv = tf.nn.batch_normalization(conv, bn_mean, bn_var,
bn_beta, bn_scale, 1e-5)
return bn_conv
def training_false():
bn_conv = tf.nn.batch_normalization(conv, pop_mean, pop_var,
bn_beta, bn_scale, 1e-5)
return bn_conv
bn_conv = tf.cond(is_training, true_fn=training_true, false_fn=training_false)
h = bn_conv
else:
h = conv
if return_w:
return h, w, b
else:
return h
def conv2dT_layer(_input, output_size, is_training=None,
k_h=5, k_w=5, d_h=2, d_w=2, scope=None,
bias0=0.0, batch_norm=False, return_w=False):
_in_shape = _input.get_shape().as_list()
with tf.variable_scope(scope or 'deconv2d'):
w = tf.get_variable("weights", [k_h, k_w, output_size[-1], _in_shape[-1]], tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
c = tf.nn.conv2d_transpose(_input, w, output_shape=output_size,
strides=[1, d_h, d_w, 1])
b = tf.get_variable("bias", [output_size[-1]],
initializer=tf.constant_initializer(bias0))
m = tf.nn.bias_add(c, b)
# KILLED THIS RESHAPE -- c and m had the same shape so is it needed??
# convT = tf.reshape(m, c.get_shape())
convT = m
if batch_norm:
norm_shape = [output_size[1], output_size[2], output_size[3]]
bn_scale = tf.get_variable("bn_scale", norm_shape, tf.float32,
initializer=tf.constant_initializer(1.0))
bn_beta = tf.get_variable("bn_beta", norm_shape, tf.float32,
initializer=tf.constant_initializer(0.0))
pop_mean = tf.Variable(tf.zeros(norm_shape),
trainable=False)
pop_var = tf.Variable(tf.ones(norm_shape),
trainable=False)
def training_true():
decay = 0.95
bn_mean, bn_var = tf.nn.moments(convT, axes=[0, 1, 2], keep_dims=False)
train_mean = tf.assign(pop_mean,
pop_mean * decay + bn_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + bn_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
bn_convT = tf.nn.batch_normalization(convT, bn_mean, bn_var,
bn_beta, bn_scale, 1e-5)
return bn_convT
def training_false():
bn_convT = tf.nn.batch_normalization(convT, pop_mean, pop_var,
bn_beta, bn_scale, 1e-5)
return bn_convT
bn_convT = tf.cond(is_training, true_fn=training_true, false_fn=training_false)
h = bn_convT
else:
h = convT
if return_w:
return h, w, b
else:
return h
def linear_layer(_input, output_size, is_training=None, scope=None,
stddev=0.02, bias0=0.0,
batch_norm=False, return_w=False):
_in_shape = _input.get_shape().as_list()
# if batch_norm:
# tf.cond(is_training,
# true_fn=print(''),
# false_fn=RuntimeError('If batchnorm, is_training MUST be passed in feeddict'))
# print(tf.shape(_input)[1])
with tf.variable_scope(scope or 'linear'):
w = tf.get_variable("weights", [_in_shape[1], output_size], tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias0))
mm = tf.matmul(_input, w) + b
if batch_norm:
bn_scale = tf.get_variable("bn_scale", output_size, tf.float32,
initializer=tf.constant_initializer(1.0))
bn_beta = tf.get_variable("bn_beta", output_size, tf.float32,
initializer=tf.constant_initializer(0.0))
pop_mean = tf.Variable(tf.zeros(output_size),
trainable=False)
pop_var = tf.Variable(tf.ones(output_size),
trainable=False)
def training_true():
decay = 0.95
bn_mean, bn_var = tf.nn.moments(mm, 0, keep_dims=False)
train_mean = tf.assign(pop_mean,
pop_mean * decay + bn_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + bn_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
bn_mm = tf.nn.batch_normalization(mm, bn_mean, bn_var,
bn_beta, bn_scale, 1e-5)
return bn_mm
def training_false():
bn_mm = tf.nn.batch_normalization(mm, pop_mean, pop_var,
bn_beta, bn_scale, 1e-5)
return bn_mm
bn_mm = tf.cond(is_training, true_fn=training_true, false_fn=training_false)
h = bn_mm
else:
h = mm
if return_w:
return h, w, b
else:
return h
def minibatch_discriminator_layer(_input, num_kernels=5, kernel_dim=3, scope='minibatch_discrim'):
"""
minibatch discrimination to prevent modal collapse:
https://github.com/AYLIEN/gan-intro/blob/master/gan.py
"""
# x = linear(_input, num_kernels * kernel_dim, scope='minibatch', stddev=0.02)
with tf.variable_scope(scope or 'minibatch_discrim'):
x = linear_layer(_input, num_kernels * kernel_dim, is_training=False, scope='linear', batch_norm=False)
activation = tf.reshape(x, (-1, num_kernels, kernel_dim))
diffs = tf.expand_dims(activation, 3) - \
tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)
abs_diffs = tf.reduce_sum(tf.abs(diffs), 2)
minibatch_features = tf.reduce_sum(tf.exp(-abs_diffs), 2)
return tf.concat([_input, minibatch_features], 1) |
amoodie/StratGAN | StratGAN/main.py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from utils import Config, mkdirs, rand_id
from model import StratGAN
"""
### MINIREADME ###
options for directories:
['multi_line','multi_line_bw','shapes_all','shapes_all_mini','shapes_star','shapes_circle']
must be specified as below ../data/
"""
# Setup configuration
# -----------
flags = tf.app.flags
# general flags
flags.DEFINE_string("run_dir", None, "Directory run name to save/load samp, log, chkp under. If none, auto select [None]")
flags.DEFINE_integer("gf_dim", 64, "Number of filters in generator [64]")
flags.DEFINE_integer("df_dim", 64, "Number of filters in discriminator [64]")
# training related flags
flags.DEFINE_boolean("train", False, "True for training [False]")
flags.DEFINE_integer("epoch", 5, "Epoch to train [5]")
flags.DEFINE_float("learning_rate", 0.0005, "Learning rate of for adam [0.0005]")
flags.DEFINE_float("beta1", 0.6, "Momentum term of adam [0.6]")
flags.DEFINE_integer("batch_size", 64, "Size of batch images [64]")
flags.DEFINE_integer("gener_iter", 2, "Number of times to iterate generator per batch [2]")
flags.DEFINE_string("image_dir", "multi_line_bw_128", "Root directory of dataset [multi_line_bw_128]")
# flags.DEFINE_integer("sample_int", 100, "The interval to sample images at during training [100]")
# painting related flags
flags.DEFINE_boolean("paint", False, "True for painting [False]")
flags.DEFINE_integer("paint_label", None, "The label to paint with")
flags.DEFINE_integer("paint_width", 1000, "The size of the paint images to produce. If None, same value as paint_height [1000]")
flags.DEFINE_integer("paint_height", None, "The size of the paint images to produce. If None, value of paint_width/4 [None]")
flags.DEFINE_integer("paint_overlap", 24, "The size of the overlap during painting [24]")
flags.DEFINE_string("paint_patcher", 'context', "Method for getting next matching patch ['context']")
flags.DEFINE_float("paint_overlap_thresh", 10.0, "The threshold L2 norm error for overlapped patch areas [10.0]")
flags.DEFINE_boolean("paint_groundtruth", False, "Whether to use a groundtruth [False]")
flags.DEFINE_string("paint_groundtruth_type", 'core', "Type of groundtruth [core]")
flags.DEFINE_boolean("paint_groundtruth_new", False, "Whether to generate new groundtruth, value is passed to whatever groundtruth specified [False]")
flags.DEFINE_string("paint_groundtruth_load", None, "String specifying where to load groundtruth canvas and meta from ['None']")
flags.DEFINE_string("paint_groundtruth_save", None, "String specifying the name to save groundtruth [None]")
flags.DEFINE_integer("paint_n_cores", 0, "The number of cores to generate in the painting process, [0]")
flags.DEFINE_float("paint_core_thresh", 2.0, "The threshold L2 norm error for overlapped core areas [2.0]")
flags.DEFINE_string("paint_savefile_root", None, "String specifying the filename for saved numpy arrays and images (no file extensions) [None]")
# post sampling related flags
flags.DEFINE_boolean("post", False, "True for post sampling [False]")
# create flag object
FLAGS = flags.FLAGS
# merge flags and fixed configs into config, which gets passed to the StratGAN object
config = Config()
# training data sources
config.image_dir = os.path.join(os.pardir, 'data', FLAGS.image_dir)
config.image_ext = '*.png'
config.img_verbose = True
# model configurations
config.batch_size = FLAGS.batch_size
config.z_dim = 100 # number inputs to gener
config.c_dim = 1
config.gf_dim = FLAGS.gf_dim # number of gener conv filters
config.df_dim = FLAGS.df_dim # number of discim conv filters
config.gfc_dim = 1024 # number of gener fully connecter layer units
config.dfc_dim = 1024 # number of discim fully connected layer units
config.alpha = 0.1 # leaky relu alpha
config.batch_norm = True
config.minibatch_discrim = True
# training hyperparameters
config.epoch = FLAGS.epoch
config.learning_rate = FLAGS.learning_rate # optim learn rate
config.beta1 = FLAGS.beta1 # momentum
config.repeat_data = True
config.shuffle_data = True
config.buffer_size = 4
config.drop_remainder = True # currently fails if false!
config.gener_iter = FLAGS.gener_iter # times to update generator per discriminator update
config.noisy_inputs = False # add some small noise to the input images
config.flip_inputs = False # whether to flip the black white pixels
# i/o structures
config.log_dir = 'log'
config.out_dir = 'out'
config.samp_dir = 'samp'
config.chkp_dir = 'chkp'
config.paint_dir = 'paint'
config.post_dir = 'post'
config.run_dir = FLAGS.run_dir
if not config.run_dir: # if the run dir was not given, make something up
config.run_dir = rand_id()
# painting configurations
pconfig = Config()
pconfig.label = FLAGS.paint_label
pconfig.width = FLAGS.paint_width
pconfig.height = FLAGS.paint_height
pconfig.overlap = FLAGS.paint_overlap
pconfig.patcher = FLAGS.paint_patcher
pconfig.overlap_thresh = FLAGS.paint_overlap_thresh
pconfig.groundtruth = FLAGS.paint_groundtruth
pconfig.groundtruth_type = FLAGS.paint_groundtruth_type
# pconfig.core_source = FLAGS.paint_core_source
pconfig.groundtruth_new = FLAGS.paint_groundtruth_new
pconfig.groundtruth_load = FLAGS.paint_groundtruth_load
pconfig.groundtruth_save = FLAGS.paint_groundtruth_save
pconfig.n_cores = FLAGS.paint_n_cores
pconfig.core_thresh = FLAGS.paint_core_thresh
pconfig.savefile_root = FLAGS.paint_savefile_root
# create folder structure
# -----------
folder_list = [config.out_dir, config.log_dir,
config.samp_dir, config.paint_dir, config.post_dir]
mkdirs(folder_list)
mkdirs([os.path.join(config.out_dir, config.run_dir),
os.path.join(config.log_dir, config.run_dir),
os.path.join(config.samp_dir, config.run_dir),
os.path.join(config.paint_dir, config.run_dir),
os.path.join(config.post_dir, config.run_dir)]) # this should be wrapped in with mkdirs function...
# model execution function
# -----------
def main(_):
with tf.Session() as sess:
# build up the model, and initialize the session
stratgan = StratGAN(sess, config)
# if train, run the training, which saves the model
if FLAGS.train:
stratgan.train()
# otherwise load
else:
chkp_dir = os.path.join(config.chkp_dir, config.run_dir)
stratgan.load(chkp_dir)
# now paint or do other post sampling
if FLAGS.paint:
stratgan.paint(pconfig)
# elif FLAGS.context_paint:
# chkp_dir = os.path.join(config.chkp_dir, config.run_dir)
# stratgan.load(paint_chkp_dir)
# stratgan.context_paint()
elif FLAGS.post:
post_chkp_dir = os.path.join(config.chkp_dir, config.run_dir)
stratgan.load(post_chkp_dir)
stratgan.post_sampler(linear_interp=0, label_interp=False, random_realizations=True)
else:
print('Neither "train", "paint", or "post" selected. Doing nothing.')
if __name__ == '__main__':
tf.app.run()
|
JeffMacaluso/mlcookbook | mlcookbook/eda.py | <filename>mlcookbook/eda.py
import numpy as np
import pandas as pd
# TODO: Consider adding predict missing values,
#### Outliers
# TODO: Add function to dynamically determine categorical columns
# Check ideas from here https://datascience.stackexchange.com/questions/9892/how-can-i-dynamically-distinguish-between-categorical-data-and-numerical-data
# and here https://stackoverflow.com/questions/35826912/what-is-a-good-heuristic-to-detect-if-a-column-in-a-pandas-dataframe-is-categori
# Printing the percentage of missing values per column
def percent_missing(dataframe):
'''
Prints the percentage of missing values for each column in a dataframe
'''
# Summing the number of missing values per column and then dividing by the total
sumMissing = dataframe.isnull().values.sum(axis=0)
pctMissing = sumMissing / dataframe.shape[0]
if sumMissing.sum() == 0:
print('No missing values')
else:
# Looping through and printing out each columns missing value percentage
print('Percent Missing Values:', '\n')
for idx, col in enumerate(dataframe.columns):
if sumMissing[idx] > 0:
print('{0}: {1:.2f}%'.format(col, pctMissing[idx] * 100))
def iqr_indices_of_outliers(X):
'''
Detects outliers using the interquartile range (IQR) method
Input: An array of a variable to detect outliers for
Output: An array with indices of detected outliers
# Note: The function in its current form is taken from <NAME>'s Machine Learning with Python Cookbook
# TODO: Update this to dynamically accept multiple features at once
'''
q1, q3 = np.percentile(X, [25, 75])
iqr = q3 - q1
lower_bound = q1 - (iqr * 1.5)
upper_bound = q3 + (iqr * 1.5)
outlier_indices = np.where((X > upper_bound) | (X < lower_bound))
return outlier_indices
def z_score_indices_of_outliers(X, threshold=3):
'''
Detects outliers using the Z score method method
Input: - X: An array of a variable to detect outliers for
- threshold: The number of standard deviations from the mean
to be considered an outlier
Output: An array with indices of detected outliers
# TODO: Update this to dynamically accept multiple features at once
'''
X_mean = np.mean(X)
X_stdev = np.std(X)
z_scores = [(y - X_mean) / X_stdev for y in X]
outlier_indices = np.where(np.abs(z_scores) > threshold)
return outlier_indices
def percentile_indices_of_outliers(X, percentile_threshold=0.1):
'''
Determines outliers based off of percentiles
Input: An array of one variable to detect outliers for
Output: An array with indices of detected outliers
'''
diff = (1 - percentile_threshold) / 2.0
minval, maxval = np.percentile(X, [diff, 100 - diff])
outlier_indices = np.where((X < minval) | (X > maxval))
return outlier_indices
def ellipses_indices_of_outliers(X, contamination=0.1):
'''
Detects outliers using the elliptical envelope method
Input: An array of all variables to detect outliers for
- TODO: Put note for what contamination is
Output: An array with indices of detected outliers
'''
from sklearn.covariance import EllipticEnvelope
# Copying to prevent changes to the input array
X = X.copy()
# Dropping categorical columns
non_categorical = []
for feature in range(X.shape[1]):
num_unique_values = len(np.unique(X.iloc[:, feature]))
if num_unique_values > 30:
non_categorical.append(feature)
X = X.iloc[:, non_categorical] # Subsetting to columns without categorical indexes
# Testing if there are an adequate number of features
if X.shape[0] < X.shape[1] ** 2.:
print('Will not perform well. Reduce the dimensionality and try again.')
return
# Creating and fitting the detector
outlier_detector = EllipticEnvelope(contamination=contamination)
outlier_detector.fit(X)
# Predicting outliers and outputting an array with 1 if it is an outlier
outliers = outlier_detector.predict(X)
outlier_indices = np.where(outliers == -1)
return outlier_indices
def isolation_forest_indices_of_outliers(X, contamination=0.1, n_estimators=100):
'''
Detects outliers using the isolation forest method
Input: An array of all variables to detect outliers for
Output: An array with indices of detected outliers
'''
from sklearn.ensemble import IsolationForest
# Copying to prevent changes to the input array
X = X.copy()
# Dropping categorical columns
non_categorical = []
for feature in range(X.shape[1]):
num_unique_values = len(np.unique(X.iloc[:, feature]))
if num_unique_values > 30:
non_categorical.append(feature)
X = X.iloc[:, non_categorical] # Subsetting to columns without categorical indexes
# Creating and fitting the detector
outlier_detector = IsolationForest(contamination=contamination, n_estimators=n_estimators,
behaviour='new') # To prevent warnings
outlier_detector.fit(X)
# Predicting outliers and outputting an array with 1 if it is an outlier
outliers = outlier_detector.predict(X)
outlier_indices = np.where(outliers == -1)
return outlier_indices
def one_class_svm_indices_of_outliers(X):
'''
Detects outliers using the one class SVM method
Input: An array of all variables to detect outliers for
Output: An array with indices of detected outliers
'''
from sklearn.svm import OneClassSVM
# Copying to prevent changes to the input array
X = X.copy()
# Dropping categorical columns
non_categorical = []
for feature in range(X.shape[1]):
num_unique_values = len(np.unique(X.iloc[:, feature]))
if num_unique_values > 30:
non_categorical.append(feature)
X = X.iloc[:, non_categorical] # Subsetting to columns without categorical indexes
# Testing if there are an adequate number of features
if X.shape[0] < X.shape[1] ** 2.:
print('Will not perform well. Reduce the dimensionality and try again.')
return
# Creating and fitting the detector
outlier_detector = OneClassSVM()
outlier_detector.fit(X)
# Predicting outliers and outputting an array with 1 if it is an outlier
outliers = outlier_detector.predict(X)
outlier_indices = np.where(outliers == -1)
return outlier_indices
def outlier_report(dataframe, z_threshold=3, per_threshold=0.95, contamination=0.1, n_trees=100):
'''
TODO: - Write Docstring
- Finish commenting function
- Remove redundant functions
'''
# Converting to a pandas dataframe if it is an array
if type(dataframe) != 'pandas.core.frame.DataFrame':
try:
dataframe = pd.DataFrame(dataframe)
except:
return 'Must be either a dataframe or a numpy array'
# Creating a copy to avoid fidelity issues
dataframe = dataframe.copy()
# Dropping categorical columns
dataframe = dataframe.select_dtypes(exclude=['bool_'])
for column in dataframe.columns:
num_unique_values = len(dataframe[column].unique())
if num_unique_values < 30:
dataframe = dataframe.drop(column, axis=1)
# Dictionaries for individual features to be packaged into a master dictionary
iqr_outlier_indices = {}
z_score_outlier_indices = {}
percentile_outlier_indices = {}
multiple_outlier_indices = {} # Indices with two or more detections
print('Detecting outliers', '\n')
# Creating an empty data frame to fill with results
results = pd.DataFrame(columns=['IQR', 'Z Score', 'Percentile', 'Multiple'])
# Single column outlier tests
print('Single feature outlier tests')
for feature in range(dataframe.shape[1]):
# Gathering feature names for use in output dictionary and results dataframe
feature_name = dataframe.columns[feature]
# Finding outliers
iqr_outliers = iqr_indices_of_outliers(dataframe.iloc[:, feature])[0]
z_score_outliers = z_score_indices_of_outliers(dataframe.iloc[:, feature])[0]
percentile_outliers = percentile_indices_of_outliers(dataframe.iloc[:, feature])[0]
multiple_outliers = np.intersect1d(iqr_outliers, z_score_outliers) # TODO: Fix this
# Adding to the empty dictionaries
iqr_outlier_indices[feature_name] = iqr_outliers
z_score_outlier_indices[feature_name] = z_score_outliers
percentile_outlier_indices[feature_name] = percentile_outliers
multiple_outlier_indices[feature_name] = multiple_outliers
# Adding to results dataframe
outlier_counts = {'IQR': len(iqr_outliers),
'Z Score': len(z_score_outliers),
'Percentile': len(percentile_outliers),
'Multiple': len(multiple_outliers)}
outlier_counts_series = pd.Series(outlier_counts, name=feature_name)
results = results.append(outlier_counts_series)
# Calculating the subtotal of outliers found
results_subtotal = results.sum()
results_subtotal.name = 'Total'
results = results.append(results_subtotal)
# Calculating the percent of total values in each column
num_observations = dataframe.shape[0]
results['IQR %'] = results['IQR'] / num_observations
results['Z Score %'] = results['Z Score'] / num_observations
results['Percentile %'] = results['Percentile'] / num_observations
results['Multiple %'] = results['Multiple'] / num_observations
# Printing the results dataframe as a table
print(results, '\n')
# All column outlier tests
print('All feature outlier tests')
ellipses_envelope_outlier_indices = ellipses_indices_of_outliers(dataframe)
print('- Ellipses Envelope: {0}'.format(len(ellipses_envelope_outlier_indices[0])))
isolation_forest_outlier_indices = isolation_forest_indices_of_outliers(dataframe)
print('- Isolation Forest: {0}'.format(len(isolation_forest_outlier_indices[0])))
one_class_svm_outlier_indices = one_class_svm_indices_of_outliers(dataframe)
print('- One Class SVM: {0}'.format(len(one_class_svm_outlier_indices[0])))
# Putting together the final dictionary for output
all_outlier_indices = {}
all_outlier_indices['Ellipses Envelope'] = ellipses_envelope_outlier_indices
all_outlier_indices['Isolation Forest'] = isolation_forest_outlier_indices
all_outlier_indices['One Class SVM'] = one_class_svm_outlier_indices
all_outlier_indices['IQR'] = iqr_outlier_indices
all_outlier_indices['Z Score'] = z_score_outlier_indices
all_outlier_indices['Percentile'] = percentile_outlier_indices
all_outlier_indices['Multiple'] = multiple_outlier_indices
return all_outlier_indices |
JeffMacaluso/mlcookbook | mlcookbook/ml.py | import numpy as np
import pandas as pd
# Random Search
def hyperparameter_random_search(X, y, model=None, parameters=None, num_folds=5, num_iterations=50):
'''
Performs a random search on hyperparameters and
TODO: Finish docstring
- Add cross validation method
- Add status bar
'''
# Randomized Search
from sklearn.model_selection import RandomizedSearchCV
import datetime
# Making sure a model or parameters exists
if model is None:
print('Please provide a model')
return
if parameters is None:
print('Please provide parameters for the model')
return
# Performing randomized search
model = RandomizedSearchCV(model, param_distributions=parameters,
n_iter=num_iterations, n_jobs=-1, cv=num_folds,
verbose=0)
print('Beginning random search at {0}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
model.fit(X, y)
print('Completed random search at {0}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
print()
# Reporting the results
print('Best Estimator:', model.best_estimator_)
print('Best Parameters:', model.best_params_)
print('Best Score:', model.best_score_)
return model
# TODO: Add grid search
# TODO: Add xgboost probability threshold search
# Probability Threshold Search - scikit-learn
def optimal_probability_cutoff(model, test_dataset, test_labels, max_thresh=0.99, step_size=0.01):
'''
Finds the optimal probability cutoff to maximize the F1 score
Returns the optimal probability cutoff, F1 score, and a plot of the results
TODO:
- Add precision, recall, and accuracy
'''
from sklearn import metrics
import matplotlib.pyplot as plt
# Prediction probabilities of the test dataset
predicted = model.predict_proba(test_dataset)[:, 1]
# Creating an empty dataframe to fill with probability cutoff thresholds and f1 scores
results = pd.DataFrame(columns=['Threshold', 'F1 Score'])
# Setting f1 score average metric based on binary or multi-class classification
if len(np.unique(test_labels)) == 2:
avg = 'binary'
else:
avg = 'micro'
# Looping trhough different probability thresholds
for thresh in np.arange(0, (max_thresh+step_size), step_size):
pred_bin = pd.Series(predicted).apply(lambda x: 1 if x > thresh else 0)
f1 = metrics.f1_score(test_labels, pred_bin, average=avg)
tempResults = {'Threshold': thresh, 'F1 Score': f1}
results = results.append(tempResults, ignore_index=True)
# Plotting the F1 score throughout different probability thresholds
plt.figure(figsize=(7, 5))
results.plot(x='Threshold', y='F1 Score')
plt.title('F1 Score by Probability Cutoff Threshold')
plt.ylabel('F1 Score')
plt.show()
best_index = list(results['F1 Score']).index(max(results['F1 Score']))
print('Threshold for Optimal F1 Score:')
return results.iloc[best_index]
# Prediction Intervals - Ensemble Scikit-Learn Models
def ensemble_prediction_intervals(model, X, X_train=None, y_train=None, percentile=0.95):
'''
Calculates the specified prediction intervals for each prediction
from an ensemble scikit-learn model.
Inputs:
- model: The scikit-learn model to create prediction intervals for. This must be
either a RandomForestRegressor or GradientBoostingRegressor
- X: The input array to create predictions & prediction intervals for
- X_train: The training features for the gradient boosted trees
- y_train: The training label for the gradient boosted trees
- percentile: The prediction interval percentile. Default of 0.95 is 0.025 - 0.975
Note: Use X_train and y_train when using a gradient boosted regressor because a copy of
the model will be re-trained with quantile loss.
These are not needed for a random forest regressor
Output: A dataframe with the predictions and prediction intervals for X
TO-DO:
- Try to optimize by removing loops where possible
- Fix upper prediction intervals for gradient boosted regressors
- Add xgboost
'''
# Checking if the model has the estimators_ attribute
if 'estimators_' not in dir(model):
print('Not an ensemble model - exiting function')
return
# Accumulating lower and upper prediction intervals
lower_PI = []
upper_PI = []
# Generating predictions to be returned with prediction intervals
print('Generating predictions with the model')
predictions = model.predict(X)
# Prediction intervals for a random forest regressor
# Taken from https://blog.datadive.net/prediction-intervals-for-random-forests/
if str(type(model)) == "<class 'sklearn.ensemble.forest.RandomForestRegressor'>":
print('Generating upper and lower prediction intervals')
# Looping through individual records for predictions
for record in range(len(X)):
estimator_predictions = []
# Looping through estimators and gathering predictions
for estimator in model.estimators_:
individual_estimator_predictions = estimator.predict(X.iloc[record].values.reshape(1, -1))[0]
estimator_predictions.append(individual_estimator_predictions)
# Adding prediction intervals
lower_PI.append(np.percentile(estimator_predictions, (1 - percentile) / 2.))
upper_PI.append(np.percentile(estimator_predictions, 100 - (1 - percentile) / 2.))
# Prediction intervals for gradient boosted trees
# Taken from http://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
if str(type(model)) == "<class 'sklearn.ensemble.gradient_boosting.GradientBoostingRegressor'>":
# Cloning the model so the original version isn't overwritten
from sklearn.base import clone
quantile_model = clone(model)
# Calculating buffer for upper/lower alpha to get the Xth percentile
alpha_buffer = ((1 - x) / 2)
alpha = percentile + alpha_buffer
# Setting the loss function to quantile before re-fitting
quantile_model.set_params(loss='quantile')
# Upper prediction interval
print('Generating upper prediction intervals')
quantile_model.set_params(alpha=alpha)
quantile_model.fit(X_train, y_train)
upper_PI = quantile_model.predict(X)
# Lower prediction interval
print('Generating lower prediction intervals')
quantile_model.set_params(alpha=(1 - alpha))
quantile_model.fit(X_train, y_train)
lower_PI = quantile_model.predict(X)
# Compiling results of prediction intervals and the actual predictions
results = pd.DataFrame({'lower_PI': lower_PI,
'prediction': predictions,
'upper_PI': upper_PI})
return results |
JeffMacaluso/mlcookbook | mlcookbook/process.py | <reponame>JeffMacaluso/mlcookbook<filename>mlcookbook/process.py
import numpy as np
import pandas as pd
# Principal Component Analysis (PCA)
def fit_PCA(X, num_components=0.99):
'''
Performs min-max normalization and PCA transformation on the input data array
Inputs:
- X: An array of values to perform PCA on
- num_components: If >1, the number of principal components desired
If <1, the percentage of variance explained desired
Outputs:
- An array of the principal components
TODO: Add check if data is already normalized
'''
from sklearn import preprocessing
from sklearn.decomposition import PCA
# Checking if the input is a numpy array and converting it if not
if type(X) != np.ndarray:
X = np.array(X)
# Normalizing data before PCA
min_max_scaler = preprocessing.MinMaxScaler()
X_norm = min_max_scaler.fit_transform(X)
# Performing PCA
pca = PCA(n_components=num_components)
pca.fit(X_norm)
# Reporting explained variance
explained_variance = pca.explained_variance_ratio_ * 100
print('Total variance % explained:', sum(explained_variance))
print()
print('Variance % explained by principal component:')
for principal_component in range(len(explained_variance)):
print(principal_component, ':', explained_variance[principal_component])
# Transforming the data before returning
principal_components = pca.transform(X_norm)
return principal_components
# Oversampling
def oversample_binary_label(dataframe, label_column):
'''
Oversamples a dataframe with a binary label to have an equal proportion in classes. Dynamically
determines the label with the lower proportion.
Inputs:
- dataframe: A dataframe containing the label
- label_column: A string of the column containing the label
Output: A dataframe with the lower proportion label oversampled
TODO: Update this to oversample the training set and return both the training and testing sets
'''
# Counting the classes
class_0_count, class_1_count = dataframe[label_column].value_counts()
# Creating two dataframes for each class
dataframe_class_0 = dataframe[dataframe[label_column] == dataframe[label_column].unique()[0]]
dataframe_class_1 = dataframe[dataframe[label_column] == dataframe[label_column].unique()[1]]
# Determining the smaller class
smaller_label = dataframe[label_column].value_counts().idxmin()
# Oversampling
if smaller_label == 0:
dataframe_class_0_oversampled = dataframe_class_0.sample(class_1_count, replace=True)
dataframe_oversampled = pd.concat([dataframe_class_1, dataframe_class_0_oversampled], axis=0)
else:
dataframe_class_1_oversampled = dataframe_class_1.sample(class_0_count, replace=True)
dataframe_oversampled = pd.concat([dataframe_class_0, dataframe_class_1_oversampled], axis=0)
# Printing results
print('Initial number of observations in each class:')
print(dataframe[label_column].value_counts())
print()
print('Oversampled number of observations in each class:')
print(dataframe_oversampled[label_column].value_counts())
return dataframe_oversampled
def oversample_smote(training_features, training_labels, is_dataframe=True):
'''
Convenience function for oversampling with SMOTE. This generates synthetic samples via interpolation.
Automatically encodes categorical columns if a dataframe is provided with categorical columns properly marked.
Input: The training features and labels. is_dataframe is for checking for categorical columns.
Output: The oversampled training features and labels
'''
from imblearn import over_sampling
if is_dataframe == True:
# Testing if there are any categorical columns
# Note: These must have the "category" datatype
categorical_variable_list = training_features.select_dtypes(exclude=['number', 'bool_', 'object_']).columns
if categorical_variable_list.shape[0] > 0:
categorical_variable_list = list(categorical_variable_list)
categorical_variable_indexes = training_features.columns.get_indexer(categorical_variable_list)
smote = over_sampling.SMOTENC(categorical_features=categorical_variable_indexes, random_state=46, n_jobs=-1)
else:
smote = over_sampling.SMOTE(random_state=46, n_jobs=-1)
else:
smote = over_sampling.SMOTE(random_state=46, n_jobs=-1)
# Performing oversampling
training_features_oversampled, training_labels_oversampled = smote.fit_sample(training_features, training_labels)
# Rounding discrete variables for appropriate cutoffs
# This is becuase SMOTE NC only deals with binary categorical variables, not discrete variables
if is_dataframe == True:
discrete_variable_list = training_features.select_dtypes(include=['int', 'int32', 'int64']).columns
if discrete_variable_list.shape[0] > 0:
discrete_variable_indexes = training_features.columns.get_indexer(discrete_variable_list)
for discrete_variable_index in discrete_variable_indexes:
training_features_oversampled[:, discrete_variable_index] = np.round(training_features_oversampled[:, discrete_variable_index].astype(float)).astype(int)
print('Previous training size:', len(training_labels))
print('Oversampled training size', len(training_labels_oversampled), '\n')
print('Previous label mean:', training_labels.astype(int).mean())
print('Oversampled label mean:', training_labels_oversampled.mean())
return training_features_oversampled, training_labels_oversampled
def target_encode(train_variable, test_variable, train_label, smoothing=1, min_samples_leaf=1, noise_level=0):
'''
Mean target encoding using <NAME> technique from the following paper:
http://helios.mm.di.uoa.gr/~rouvas/ssi/sigkdd/sigkdd.vol3.1/barreca.pdf
This function heavily borrows code from Olivier's Kaggle post:
https://www.kaggle.com/ogrellier/python-target-encoding-for-categorical-features
Inputs:
- train_variable (Series): Variable in the training set to perform the encoding on.
- test_variable (Series): Variable in the testing set to be transformed.
- train_label (Series): The label in the training set to use for performing the encoding.
- smoothing (int): Balances the categorical average vs. the prior.
- min_samples_leaf (int): The minimum number of samples to take the category averagesinto account.
- noise_level (int): Amount of Gaussian noise to add in order to help prevent overfitting.
'''
def add_noise(series, noise_level):
'''
Adds Gaussian noise to the data
'''
return series * (1 + noise_level * np.random.randn(len(series)))
assert len(train_variable) == len(train_label)
assert train_variable.name == test_variable.name
# Creating a data frame out of the training variable and label in order to get the averages of the label
# for the training variable
temp = pd.concat([train_variable, train_label], axis=1)
# Computing the target mean
averages = temp.groupby(train_variable.name)[train_label.name].agg(['mean', 'count'])
# Computing the smoothing
smoothing = 1 / (1 + np.exp(-(averages['count'] - min_samples_leaf) / smoothing))
# Calculating the prior before adding the smoothing
prior = train_label.mean()
# Adding the smoothing to the prior to get the posterior
# Larger samples will take the average into account less
averages[train_label.name] = prior * (1 - smoothing) + averages['mean'] * smoothing
# Applying the averages to the training variable
fitted_train_variable = pd.merge(
train_variable.to_frame(train_variable.name),
averages.reset_index().rename(columns={'index': train_label.name, train_label.name: 'average'}),
on=train_variable.name, how='left')
fitted_train_variable = fitted_train_variable['average'].rename(train_variable.name + '_mean').fillna(prior)
fitted_train_variable.index = train_variable.index # Restoring the index lost in pd.merge
# Applying the averages to the testing variable
fitted_test_variable = pd.merge(
test_variable.to_frame(test_variable.name),
averages.reset_index().rename(columns={'index': train_label.name, train_label.name: 'average'}),
on=test_variable.name, how='left')
fitted_test_variable = fitted_test_variable['average'].rename(test_variable.name + '_mean').fillna(prior)
fitted_test_variable.index = fitted_test_variable.index # Restoring the index lost in pd.merge
# Adding the noise if there is any
if noise_level != 0:
fitted_train_variable = add_noise(fitted_train_variable, noise_level)
fitted_test_variable = add_noise(fitted_test_variable, noise_level)
return fitted_train_variable, fitted_test_variable |
JeffMacaluso/mlcookbook | mlcookbook/__init__.py | import mlcookbook.eda
import mlcookbook.ml
import mlcookbook.misc
import mlcookbook.nlp
import mlcookbook.plot
import mlcookbook.process
def diagnostics():
'''
Reports the current date/time, package versions, and machine hardware
TODO: Make this work dynamically with imported libraries
'''
import sys
import os
import time
import numpy as np
import pandas as pd
import sklearn
print(time.strftime('%Y/%m/%d %H:%M'))
print('OS:', sys.platform)
print('CPU Cores:', os.cpu_count())
print('Python:', sys.version)
print('NumPy:', np.__version__)
print('Pandas:', pd.__version__)
print('Scikit-Learn:', sklearn.__version__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.