content
stringlengths 5
1.05M
|
|---|
import os
def get_project_dir():
return os.path.dirname(os.path.abspath(__file__))
def get_output_dir():
out = os.path.join(get_project_dir(), 'out')
os.makedirs(out, exist_ok=True)
return out
def get_data_dir():
return os.path.join(get_project_dir(), 'data')
|
import unittest
from cassyy import (
CASClient,
CASError,
CASInvalidServiceError,
CASInvalidTicketError,
)
class CASClientTestCase(unittest.TestCase):
cas_login_url = 'https://cas.local/login'
cas_logout_url = 'https://cas.local/logout'
cas_validate_url = 'https://cas.local/p3/serviceValidate'
test_service_url = 'https://foo.org'
def setUp(self):
self.client = CASClient(
self.cas_login_url,
self.cas_logout_url,
self.cas_validate_url
)
def test_from_base_url(self):
c = CASClient.from_base_url('https://cas.local/')
self.assertEqual(c.login_url, 'https://cas.local/login')
self.assertEqual(c.logout_url, 'https://cas.local/logout')
self.assertEqual(c.validate_url, 'https://cas.local/p3/serviceValidate')
def test_from_base_url_with_alt_paths(self):
c = CASClient.from_base_url('https://cas.local/',
login_path='foo',
logout_path='bar/baz',
validate_path='/qux')
self.assertEqual(c.login_url, 'https://cas.local/foo')
self.assertEqual(c.logout_url, 'https://cas.local/bar/baz')
self.assertEqual(c.validate_url, 'https://cas.local/qux')
def test_parse_userid(self):
s = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>jdoe</cas:user>
</cas:authenticationSuccess>
</cas:serviceResponse>
"""
self.assertEqual('jdoe', self.client.parse_cas_response(s).userid)
def test_parse_non_xml(self):
s = "jdoe"
with self.assertRaises(CASError) as cm:
self.client.parse_cas_response(s)
self.assertEqual('INVALID_RESPONSE', cm.exception.error_code)
self.assertEqual("ParseError('syntax error: line 1, column 0')",
str(cm.exception.args[1]))
def test_parse_invalid_ticket(self):
s = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationFailure code="INVALID_TICKET">
Ticket 'ST-foo' not recognized
</cas:authenticationFailure>
</cas:serviceResponse>
"""
with self.assertRaises(CASInvalidTicketError) as cm:
self.client.parse_cas_response(s)
self.assertEqual('INVALID_TICKET', cm.exception.error_code)
def test_parse_invalid_service(self):
s = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationFailure code="INVALID_SERVICE">
Ticket 'ST-338345-KTQdtsv9b5WKtfVfrahU-cas3' does not match supplied service. '
The original service was 'https://foo.org' and the supplied service was 'https://foo2.org'.
</cas:authenticationFailure>
</cas:serviceResponse>
"""
with self.assertRaises(CASInvalidServiceError) as cm:
self.client.parse_cas_response(s)
self.assertEqual('INVALID_SERVICE', cm.exception.error_code)
def test_build_login_url(self):
url = self.client.build_login_url(self.test_service_url)
self.assertEqual(
f'{self.cas_login_url}?service=https%3A%2F%2Ffoo.org',
url
)
def test_build_login_url_with_postback(self):
url = self.client.build_login_url(self.test_service_url, callback_post=True)
self.assertEqual(
f'{self.cas_login_url}?service=https%3A%2F%2Ffoo.org&method=POST',
url
)
def test_build_validate_url(self):
url = self.client.build_validate_url(self.test_service_url, 'tix')
self.assertEqual(
f'{self.cas_validate_url}?service=https%3A%2F%2Ffoo.org&ticket=tix',
url
)
def test_build_logout_url(self):
url = self.client.build_logout_url(self.test_service_url)
self.assertEqual(
f'{self.cas_logout_url}?service=https%3A%2F%2Ffoo.org',
url
)
def test_parse_attributes(self):
s = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>jdoe</cas:user>
<cas:attributes>
<cas:clientIpAddress>10.0.0.2</cas:clientIpAddress>
<cas:isFromNewLogin>true</cas:isFromNewLogin>
<cas:mail>jdoe@foo.org</cas:mail>
<cas:authenticationDate>2022-01-21T23:03:05.920747Z</cas:authenticationDate>
<cas:bypassMultifactorAuthentication>false</cas:bypassMultifactorAuthentication>
<cas:authnContextClass>mfa-example</cas:authnContextClass>
<cas:successfulAuthenticationHandlers>DuoSecurityAuthenticationHandler</cas:successfulAuthenticationHandlers>
<cas:userAgent>Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36</cas:userAgent>
<cas:cn>Jane Doe</cas:cn>
<cas:credentialType>DuoSecurityCredential</cas:credentialType>
<cas:authenticationMethod>DuoSecurityAuthenticationHandler</cas:authenticationMethod>
<cas:serverIpAddress>10.0.0.1</cas:serverIpAddress>
<cas:longTermAuthenticationRequestTokenUsed>false</cas:longTermAuthenticationRequestTokenUsed>
</cas:attributes>
</cas:authenticationSuccess>
</cas:serviceResponse>
"""
cas_user = self.client.parse_cas_response(s)
self.assertEqual('jdoe', cas_user.userid)
self.assertEqual('jdoe@foo.org', cas_user.attributes['mail'])
self.assertEqual('Jane Doe', cas_user.attributes['cn'])
self.assertEqual('10.0.0.2', cas_user.attributes['clientIpAddress'])
|
# Lendo um valor em real, a cotação em dolar e imprimindo o valor em dolar
real = float(input('Entre com valor em real(R$): '))
cotacao = float(input('Entre com a cotação do dolar($): '))
conversao = real * cotacao
print(f'O valor R${real} com a cotação atual de R${cotacao}, corresponde ao valor de U${conversao:.2f}')
|
"""
@Author: hezf
@Time: 2021/6/3 14:42
@desc:
"""
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
import torch
import torch.optim as optim
from utils import *
from copy import deepcopy
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from itertools import zip_longest
from collections import Counter
from module import Attention, FGM
class PTModel(nn.Module):
"""
BERT预训练模型进行多任务【多分类、多标签】
"""
def __init__(self, model, ans_class, prop_label, entity_class, dropout_p=0.1):
super(PTModel, self).__init__()
self.model = model
self.layer_norm = nn.LayerNorm(768)
self.dropout = nn.Dropout(p=dropout_p)
self.ans_classifier = nn.Linear(768, ans_class)
self.prop_classifier = nn.Linear(768, prop_label)
self.entity_classifier = nn.Linear(768, entity_class)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
cls_emb = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
cls_emb = cls_emb[1]
cls_emb = self.layer_norm(cls_emb)
cls_emb = self.dropout(cls_emb)
ans_logits = self.ans_classifier(cls_emb)
prop_logits = self.prop_classifier(cls_emb)
prop_logits = torch.sigmoid(prop_logits)
entity_logits = self.entity_classifier(cls_emb)
return ans_logits, prop_logits, entity_logits
class PTModelTaskAttention(nn.Module):
"""
[设为默认baseline]
BERT预训练模型进行多任务【多分类、多标签】
"""
def __init__(self, model, ans_class, prop_label, entity_class, model_config, dropout_p=0.1):
super(PTModelTaskAttention, self).__init__()
self.model = model
self.dropout = nn.Dropout(p=dropout_p)
self.layer_norm = nn.LayerNorm(model_config.hidden_size)
self.ans_classifier = nn.Linear(model_config.hidden_size, ans_class)
self.prop_classifier = nn.Linear(model_config.hidden_size, prop_label)
self.entity_classifier = nn.Linear(model_config.hidden_size, entity_class)
self.ans_attention = Attention(hidden_size=model_config.hidden_size)
self.prop_attention = Attention(hidden_size=model_config.hidden_size)
self.entity_attention = Attention(hidden_size=model_config.hidden_size)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
mask = input_ids == 0
cls_emb = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
cls_emb = cls_emb[0]
cls_emb = self.layer_norm(cls_emb)
cls_emb = self.dropout(cls_emb)
ans_logits = self.ans_classifier(self.ans_attention(cls_emb, mask))
prop_logits = self.prop_classifier(self.prop_attention(cls_emb, mask))
prop_logits = torch.sigmoid(prop_logits)
entity_logits = self.entity_classifier(self.entity_attention(cls_emb, mask))
return ans_logits, prop_logits, entity_logits
class PTModelTaskAttentionV1(nn.Module):
"""
在baseline的基础上添加了【开通方式、开通条件的判断】
"""
def __init__(self, model, ans_class, prop_label, entity_class, model_config, dropout_p=0.1):
super(PTModelTaskAttentionV1, self).__init__()
self.model = model
self.dropout = nn.Dropout(p=dropout_p)
self.layer_norm = nn.LayerNorm(model_config.hidden_size)
self.ans_classifier = nn.Linear(model_config.hidden_size, ans_class)
self.prop_classifier = nn.Linear(model_config.hidden_size, prop_label)
self.entity_classifier = nn.Linear(model_config.hidden_size, entity_class)
# 二分类
self.method_classifier = nn.Linear(model_config.hidden_size, 2)
self.condition_classifier = nn.Linear(model_config.hidden_size, 2)
self.ans_attention = Attention(hidden_size=model_config.hidden_size)
self.prop_attention = Attention(hidden_size=model_config.hidden_size)
self.entity_attention = Attention(hidden_size=model_config.hidden_size)
self.method_attention = Attention(hidden_size=model_config.hidden_size)
self.condition_attention = Attention(hidden_size=model_config.hidden_size)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
mask = input_ids == 0
cls_emb = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
cls_emb = cls_emb[0]
cls_emb = self.layer_norm(cls_emb)
cls_emb = self.dropout(cls_emb)
ans_logits = self.ans_classifier(self.ans_attention(cls_emb, mask))
prop_logits = self.prop_classifier(self.prop_attention(cls_emb, mask))
prop_logits = torch.sigmoid(prop_logits)
entity_logits = self.entity_classifier(self.entity_attention(cls_emb, mask))
method_logits = self.method_classifier(self.method_attention(cls_emb, mask))
condition_logits = self.condition_classifier(self.condition_attention(cls_emb, mask))
return ans_logits, prop_logits, entity_logits, method_logits, condition_logits
class PTModelBiClassifier(nn.Module):
"""
这里用来做"开通方式和开通条件的判断"
"""
def __init__(self, model, model_config, dropout_p=0.1):
super(PTModelBiClassifier, self).__init__()
self.model = model
self.attention = Attention(hidden_size=model_config.hidden_size)
self.dropout = nn.Dropout(p=dropout_p)
self.layer_norm = nn.LayerNorm(model_config.hidden_size)
self.classifier = nn.Linear(model_config.hidden_size, 2)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
cls_emb = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
if len(cls_emb) > 1:
cls_emb = cls_emb[1]
else:
cls_emb = cls_emb[0]
mask = input_ids == 0
cls_emb = self.attention(cls_emb, mask)
cls_emb = self.layer_norm(cls_emb)
cls_emb = self.dropout(cls_emb)
logits = self.classifier(cls_emb)
return logits
class PTModelAttention(nn.Module):
"""
BERT预训练模型进行多任务【多分类、多标签】
"""
def __init__(self, model, ans_class, prop_label, entity_class, dropout_p=0.1):
super(PTModelAttention, self).__init__()
self.ans_class = ans_class
self.prop_label = prop_label
self.entity_class = entity_class
self.model = model
self.dropout = nn.Dropout(p=dropout_p)
self.attention_ans = nn.Linear(768, 16)
self.attention_prop = nn.Linear(768, 16)
self.attention_entity = nn.Linear(768, 16)
self.ans_classifier = nn.Linear(768, 16*ans_class)
self.prop_classifier = nn.Linear(768, 16*prop_label)
self.entity_classifier = nn.Linear(768, 16*entity_class)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
cls_emb = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
cls_emb = cls_emb[1]
cls_emb = self.dropout(cls_emb)
# ans
attention_value1 = torch.softmax(self.attention_ans(cls_emb), dim=-1).unsqueeze(dim=1)
ans_logits = self.ans_classifier(cls_emb).view(-1, 16, self.ans_class).contiguous()
ans_logits = torch.bmm(attention_value1, ans_logits, out=None).squeeze(1)
# prop
attention_value2 = torch.softmax(self.attention_prop(cls_emb), dim=-1).unsqueeze(dim=1)
prop_logits = self.prop_classifier(cls_emb).view(-1, 16, self.prop_label).contiguous()
prop_logits = torch.bmm(attention_value2, prop_logits, out=None).squeeze(1)
prop_logits = torch.sigmoid(prop_logits)
# entity
attention_value3 = torch.softmax(self.attention_entity(cls_emb), dim=-1).unsqueeze(dim=1)
entity_logits = self.entity_classifier(cls_emb).view(-1, 16, self.entity_class).contiguous()
entity_logits = torch.bmm(attention_value3, entity_logits, out=None).squeeze(1)
return ans_logits, prop_logits, entity_logits
class PTModelForProperty(nn.Module):
"""
BERT预训练模型单独进行多标签任务
"""
def __init__(self, model, prop_label, dropout_p=0.1):
super(PTModelForProperty, self).__init__()
self.prop_label = prop_label
self.model = model
self.dropout = nn.Dropout(p=dropout_p)
self.attention = nn.Linear(768, 16)
self.prop_classifier = nn.Linear(768, prop_label*16)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
cls_emb = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
cls_emb = cls_emb[1]
cls_emb = self.dropout(cls_emb)
attention_value = torch.softmax(self.attention(cls_emb), dim=-1).unsqueeze(dim=1)
prop_logits = self.prop_classifier(cls_emb).view(-1, 16, self.prop_label).contiguous()
prop_logits = torch.bmm(attention_value, prop_logits, out=None).squeeze(1)
prop_logits = torch.sigmoid(prop_logits)
return prop_logits
class PTModelForPropertyV2(nn.Module):
"""
BERT预训练模型单独进行多标签任务【用多个二分类来完成多标签任务】
"""
def __init__(self, model, prop_label, dropout_p=0.1):
super(PTModelForPropertyV2, self).__init__()
self.prop_label = prop_label
self.model = model
self.dropout = nn.Dropout(p=dropout_p)
self.classifier_list = nn.ModuleList([nn.Linear(768, 2) for _ in range(prop_label)])
self.layer_norm = nn.LayerNorm(768)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
cls_emb = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
cls_emb = cls_emb[1]
cls_emb = self.dropout(cls_emb)
# 添加layernorm
cls_emb = self.layer_norm(cls_emb)
logits = []
for classifier in self.classifier_list:
logit = classifier(cls_emb)
logits.append(logit)
logits = torch.stack(logits).view(-1, 2).contiguous()
return logits
class PTModelForMultiClassification(nn.Module):
"""
BERT预训练模型进行多分类任务
"""
def __init__(self, model, n_class, dropout_p=0.1):
super(PTModelForMultiClassification, self).__init__()
self.n_class = n_class
self.model = model
self.attention = nn.Linear(768, 16)
self.dropout = nn.Dropout(p=dropout_p)
self.classifier = nn.Linear(768, 16*n_class)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
cls_emb = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
cls_emb = cls_emb[1]
cls_emb = self.dropout(cls_emb)
attention_value = torch.softmax(self.attention(cls_emb), dim=-1).unsqueeze(dim=1)
logits = self.classifier(cls_emb).view(-1, 16, self.n_class).contiguous()
logits = torch.bmm(attention_value, logits, out=None).squeeze(1)
return logits
class BILSTM_CRF_Model(object):
def __init__(self, vocab_size, out_size, gpu_id=0, crf=True):
"""功能:对LSTM的模型进行训练与测试
参数:
vocab_size:词典大小
out_size:标注种类
crf选择是否添加CRF层"""
self.device = torch.device(
"cuda:{}".format(gpu_id) if torch.cuda.is_available() else "cpu")
# 加载模型参数
self.emb_size = LSTMConfig.emb_size
self.hidden_size = LSTMConfig.hidden_size
self.crf = crf
self.model = BiLSTM_CRF(vocab_size, self.emb_size,
self.hidden_size, out_size).to(self.device)
self.cal_loss_func = cal_lstm_crf_loss
# 加载训练参数:
self.epoches = TrainingConfig.epoches
self.print_step = TrainingConfig.print_step
self.lr = TrainingConfig.lr
self.batch_size = TrainingConfig.batch_size
# 初始化优化器
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
# 初始化学习率优化器
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[20, 30, 40], gamma=0.5)
# 初始化其他指标
self.step = 0
self._best_val_loss = 1e18
self.best_model = None
self.fgm = FGM(self.model)
def train(self, word_lists, tag_lists,
dev_word_lists, dev_tag_lists,
word2id, tag2id, debug='0'):
# 对数据集按照长度进行排序
word_lists, tag_lists, _ = sort_by_lengths(word_lists, tag_lists)
dev_word_lists, dev_tag_lists, _ = sort_by_lengths(
dev_word_lists, dev_tag_lists)
B = self.batch_size
for e in range(1, self.epoches + 1):
self.step = 0
losses = 0.
for ind in range(0, len(word_lists), B):
batch_sents = word_lists[ind:ind + B]
batch_tags = tag_lists[ind:ind + B]
losses += self.train_step(batch_sents,
batch_tags, word2id, tag2id)
if self.step % TrainingConfig.print_step == 0:
total_step = (len(word_lists) // B + 1)
if debug == '1':
print("Epoch {}, step/total_step: {}/{} {:.2f}% Loss:{:.4f}".format(
e, self.step, total_step,
100. * self.step / total_step,
losses / self.print_step
))
losses = 0.
self.scheduler.step()
# 每轮结束测试在验证集上的性能,保存最好的一个
val_loss = self.validate(
dev_word_lists, dev_tag_lists, word2id, tag2id, debug)
if debug == '1':
print("Epoch {}, Val Loss:{:.4f}".format(e, val_loss))
def train_step(self, batch_sents, batch_tags, word2id, tag2id):
self.model.train()
self.step += 1
# 准备数据
tensorized_sents, lengths = tensorized(batch_sents, word2id)
tensorized_sents = tensorized_sents.to(self.device)
targets, lengths = tensorized(batch_tags, tag2id)
targets = targets.to(self.device)
# forward
scores = self.model(tensorized_sents, lengths)
# 计算损失 更新参数
self.optimizer.zero_grad()
loss = self.cal_loss_func(scores, targets, tag2id).to(self.device)
loss.backward()
# TODO 对抗攻击还没在测试集验证效果
# self.fgm.attack('embedding')
# scores_atk = self.model(tensorized_sents, lengths)
# loss_atk = self.cal_loss_func(scores_atk, targets, tag2id).to(self.device)
# loss_atk.backward()
# self.fgm.restore()
self.optimizer.step()
return loss.item()
def validate(self, dev_word_lists, dev_tag_lists, word2id, tag2id, debug):
self.model.eval()
with torch.no_grad():
val_losses = 0.
val_step = 0
for ind in range(0, len(dev_word_lists), self.batch_size):
val_step += 1
# 准备batch数据
batch_sents = dev_word_lists[ind:ind + self.batch_size]
batch_tags = dev_tag_lists[ind:ind + self.batch_size]
tensorized_sents, lengths = tensorized(
batch_sents, word2id)
tensorized_sents = tensorized_sents.to(self.device)
targets, lengths = tensorized(batch_tags, tag2id)
targets = targets.to(self.device)
# forward
scores = self.model(tensorized_sents, lengths)
# 计算损失
loss = self.cal_loss_func(
scores, targets, tag2id).to(self.device)
val_losses += loss.item()
val_loss = val_losses / val_step
if val_loss < self._best_val_loss:
if debug == '1':
print("保存模型...")
self.best_model = deepcopy(self.model)
self._best_val_loss = val_loss
return val_loss
def test(self, word_lists, tag_lists, word2id, tag2id):
"""返回最佳模型在测试集上的预测结果"""
# 准备数据
word_lists, tag_lists, indices = sort_by_lengths(word_lists, tag_lists)
tensorized_sents, lengths = tensorized(word_lists, word2id)
tensorized_sents = tensorized_sents.to(self.device)
self.best_model.eval()
with torch.no_grad():
batch_tagids = self.best_model.test(
tensorized_sents, lengths, tag2id)
# 将id转化为标注
pred_tag_lists = []
id2tag = dict((id_, tag) for tag, id_ in tag2id.items())
for i, ids in enumerate(batch_tagids):
tag_list = []
if self.crf:
for j in range(lengths[i] - 1): # crf解码过程中,end被舍弃
tag_list.append(id2tag[ids[j].item()])
else:
for j in range(lengths[i]):
tag_list.append(id2tag[ids[j].item()])
pred_tag_lists.append(tag_list)
# indices存有根据长度排序后的索引映射的信息
# 比如若indices = [1, 2, 0] 则说明原先索引为1的元素映射到的新的索引是0,
# 索引为2的元素映射到新的索引是1...
# 下面根据indices将pred_tag_lists和tag_lists转化为原来的顺序
ind_maps = sorted(list(enumerate(indices)), key=lambda e: e[1])
indices, _ = list(zip(*ind_maps))
pred_tag_lists = [pred_tag_lists[i] for i in indices]
tag_lists = [tag_lists[i] for i in indices]
return pred_tag_lists, tag_lists
def test_k_pre(self, word_lists, tag_lists, word2id):
"""将test分成两部分,pre得到得分,post根据得分得到编码"""
# 准备数据
word_lists, tag_lists, _ = sort_by_lengths(word_lists, tag_lists)
tensorized_sents, lengths = tensorized(word_lists, word2id)
tensorized_sents = tensorized_sents.to(self.device)
self.best_model.eval()
with torch.no_grad():
score = self.best_model.test_k_pre(
tensorized_sents, lengths)
return score
def test_k_post(self, word_lists, tag_lists, word2id, tag2id, score):
"""将test分成两部分,pre得到得分,post根据得分得到编码,score应该是5折的平均或最大值结果"""
word_lists, tag_lists, indices = sort_by_lengths(word_lists, tag_lists)
tensorized_sents, lengths = tensorized(word_lists, word2id)
batch_tagids = self.best_model.test_k_post(score, lengths, tag2id)
# 将id转化为标注
pred_tag_lists = []
id2tag = dict((id_, tag) for tag, id_ in tag2id.items())
for i, ids in enumerate(batch_tagids):
tag_list = []
if self.crf:
for j in range(lengths[i] - 1): # crf解码过程中,end被舍弃
tag_list.append(id2tag[ids[j].item()])
else:
for j in range(lengths[i]):
tag_list.append(id2tag[ids[j].item()])
pred_tag_lists.append(tag_list)
# indices存有根据长度排序后的索引映射的信息
# 比如若indices = [1, 2, 0] 则说明原先索引为1的元素映射到的新的索引是0,
# 索引为2的元素映射到新的索引是1...
# 下面根据indices将pred_tag_lists和tag_lists转化为原来的顺序
ind_maps = sorted(list(enumerate(indices)), key=lambda e: e[1])
indices, _ = list(zip(*ind_maps))
pred_tag_lists = [pred_tag_lists[i] for i in indices]
tag_lists = [tag_lists[i] for i in indices]
return pred_tag_lists, tag_lists
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, emb_size, hidden_size, out_size):
"""初始化参数:
vocab_size:字典的大小
emb_size:词向量的维数
hidden_size:隐向量的维数
out_size:标注的种类
"""
super(BiLSTM_CRF, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.bilstm = nn.LSTM(emb_size, hidden_size,
batch_first=True,
bidirectional=True)
self.lin = nn.Linear(2*hidden_size, out_size)
# CRF实际上就是多学习一个转移矩阵 [out_size, out_size] 初始化为均匀分布
self.transition = nn.Parameter(
torch.ones(out_size, out_size) * 1/out_size)
# self.transition.data.zero_()
def forward(self, sents_tensor, lengths):
# 添加,以防止警告提醒
self.bilstm.flatten_parameters()
# [B, L, out_size]
emb = self.embedding(sents_tensor) # [B, L, emb_size]
packed = pack_padded_sequence(emb, lengths, batch_first=True)
rnn_out, _ = self.bilstm(packed)
# rnn_out:[B, L, hidden_size*2]
rnn_out, _ = pad_packed_sequence(rnn_out, batch_first=True)
emission = self.lin(rnn_out) # [B, L, out_size]
# 计算CRF scores, 这个scores大小为[B, L, out_size, out_size]
# 也就是每个字对应对应一个 [out_size, out_size]的矩阵
# 这个矩阵第i行第j列的元素的含义是:上一时刻tag为i,这一时刻tag为j的分数
batch_size, max_len, out_size = emission.size()
crf_scores = emission.unsqueeze(
2).expand(-1, -1, out_size, -1) + self.transition.unsqueeze(0)
return crf_scores
def test(self, test_sents_tensor, lengths, tag2id):
"""使用维特比算法进行解码"""
start_id = tag2id['<start>']
end_id = tag2id['<end>']
pad = tag2id['<pad>']
tagset_size = len(tag2id)
crf_scores = self.forward(test_sents_tensor, lengths)
device = crf_scores.device
# B:batch_size, L:max_len, T:target set size
B, L, T, _ = crf_scores.size()
# viterbi[i, j, k]表示第i个句子,第j个字对应第k个标记的最大分数
viterbi = torch.zeros(B, L, T).to(device)
# backpointer[i, j, k]表示第i个句子,第j个字对应第k个标记时前一个标记的id,用于回溯
backpointer = (torch.zeros(B, L, T).long() * end_id).to(device)
lengths = torch.LongTensor(lengths).to(device)
# 向前递推
for step in range(L):
batch_size_t = (lengths > step).sum().item()
if step == 0:
# 第一个字它的前一个标记只能是start_id
viterbi[:batch_size_t, step,
:] = crf_scores[: batch_size_t, step, start_id, :]
backpointer[: batch_size_t, step, :] = start_id
else:
max_scores, prev_tags = torch.max(
viterbi[:batch_size_t, step-1, :].unsqueeze(2) +
crf_scores[:batch_size_t, step, :, :], # [B, T, T]
dim=1
)
viterbi[:batch_size_t, step, :] = max_scores
backpointer[:batch_size_t, step, :] = prev_tags
# 在回溯的时候我们只需要用到backpointer矩阵
backpointer = backpointer.view(B, -1) # [B, L * T]
tagids = [] # 存放结果
tags_t = None
for step in range(L-1, 0, -1):
batch_size_t = (lengths > step).sum().item()
if step == L-1:
index = torch.ones(batch_size_t).long() * (step * tagset_size)
index = index.to(device)
index += end_id
else:
prev_batch_size_t = len(tags_t)
new_in_batch = torch.LongTensor(
[end_id] * (batch_size_t - prev_batch_size_t)).to(device)
offset = torch.cat(
[tags_t, new_in_batch],
dim=0
) # 这个offset实际上就是前一时刻的
index = torch.ones(batch_size_t).long() * (step * tagset_size)
index = index.to(device)
index += offset.long()
try:
tags_t = backpointer[:batch_size_t].gather(
dim=1, index=index.unsqueeze(1).long())
except RuntimeError:
import pdb
pdb.set_trace()
tags_t = tags_t.squeeze(1)
tagids.append(tags_t.tolist())
# tagids:[L-1](L-1是因为扣去了end_token),大小的liebiao
# 其中列表内的元素是该batch在该时刻的标记
# 下面修正其顺序,并将维度转换为 [B, L]
tagids = list(zip_longest(*reversed(tagids), fillvalue=pad))
tagids = torch.Tensor(tagids).long()
# 返回解码的结果
return tagids
def test_k_pre(self, test_sents_tensor, lengths):
crf_scores = self.forward(test_sents_tensor, lengths)
return crf_scores
def test_k_post(self, crf_scores, lengths, tag2id):
"""使用维特比算法进行解码"""
start_id = tag2id['<start>']
end_id = tag2id['<end>']
pad = tag2id['<pad>']
tagset_size = len(tag2id)
device = crf_scores.device
# B:batch_size, L:max_len, T:target set size
B, L, T, _ = crf_scores.size()
# viterbi[i, j, k]表示第i个句子,第j个字对应第k个标记的最大分数
viterbi = torch.zeros(B, L, T).to(device)
# backpointer[i, j, k]表示第i个句子,第j个字对应第k个标记时前一个标记的id,用于回溯
backpointer = (torch.zeros(B, L, T).long() * end_id).to(device)
lengths = torch.LongTensor(lengths).to(device)
# 向前递推
for step in range(L):
batch_size_t = (lengths > step).sum().item()
if step == 0:
# 第一个字它的前一个标记只能是start_id
viterbi[:batch_size_t, step,
:] = crf_scores[: batch_size_t, step, start_id, :]
backpointer[: batch_size_t, step, :] = start_id
else:
max_scores, prev_tags = torch.max(
viterbi[:batch_size_t, step - 1, :].unsqueeze(2) +
crf_scores[:batch_size_t, step, :, :], # [B, T, T]
dim=1
)
viterbi[:batch_size_t, step, :] = max_scores
backpointer[:batch_size_t, step, :] = prev_tags
# 在回溯的时候我们只需要用到backpointer矩阵
backpointer = backpointer.view(B, -1) # [B, L * T]
tagids = [] # 存放结果
tags_t = None
for step in range(L - 1, 0, -1):
batch_size_t = (lengths > step).sum().item()
if step == L - 1:
index = torch.ones(batch_size_t).long() * (step * tagset_size)
index = index.to(device)
index += end_id
else:
prev_batch_size_t = len(tags_t)
new_in_batch = torch.LongTensor(
[end_id] * (batch_size_t - prev_batch_size_t)).to(device)
offset = torch.cat(
[tags_t, new_in_batch],
dim=0
) # 这个offset实际上就是前一时刻的
index = torch.ones(batch_size_t).long() * (step * tagset_size)
index = index.to(device)
index += offset.long()
try:
tags_t = backpointer[:batch_size_t].gather(
dim=1, index=index.unsqueeze(1).long())
except RuntimeError:
import pdb
pdb.set_trace()
tags_t = tags_t.squeeze(1)
tagids.append(tags_t.tolist())
# tagids:[L-1](L-1是因为扣去了end_token),大小的liebiao
# 其中列表内的元素是该batch在该时刻的标记
# 下面修正其顺序,并将维度转换为 [B, L]
tagids = list(zip_longest(*reversed(tagids), fillvalue=pad))
tagids = torch.Tensor(tagids).long()
# 返回解码的结果
return tagids
class BERT_BILSTM_CRF_Model(object):
def __init__(self, vocab_size, out_size, bert_model, gpu_id=0, crf=True):
"""功能:对LSTM的模型进行训练与测试
参数:
vocab_size:词典大小
out_size:标注种类
crf选择是否添加CRF层"""
self.device = torch.device(
"cuda:{}".format(gpu_id) if torch.cuda.is_available() else "cpu")
# 加载模型参数
self.emb_size = BERTLSTMConfig.emb_size
self.hidden_size = BERTLSTMConfig.hidden_size
self.crf = crf
self.model = BERT_BiLSTM_CRF(vocab_size, self.emb_size,
self.hidden_size, out_size, bert_model).to(self.device)
self.bert_model = bert_model
self.cal_loss_func = cal_lstm_crf_loss
# 加载训练参数:
self.epoches = BertTrainingConfig.epoches
self.print_step = BertTrainingConfig.print_step
self.lr = BertTrainingConfig.lr
self.other_lr = BertTrainingConfig.other_lr
self.batch_size = BertTrainingConfig.batch_size
# 初始化优化器
self.optimizer = self._build_optimizer() # optim.Adam(self.model.parameters(), lr=self.lr)
# 初始化学习率优化器
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[20, 30], gamma=0.5)
# 初始化其他指标
self.step = 0
self._best_val_loss = 1e18
self.best_model = None
def _build_optimizer(self):
module = (self.model.module if hasattr(self.model, "module") else self.model)
# 差分学习率
no_decay = ["bias", "LayerNorm.weight"]
model_param = list(module.named_parameters())
bert_param_optimizer = []
other_param_optimizer = []
for name, para in model_param:
space = name.split('.')
if space[0] == 'bert_model':
bert_param_optimizer.append((name, para))
else:
other_param_optimizer.append((name, para))
optimizer_grouped_parameters = [
# bert other module
{"params": [p for n, p in bert_param_optimizer if not any(nd in n for nd in no_decay)],
"weight_decay": 0.0, 'lr': self.lr},
{"params": [p for n, p in bert_param_optimizer if any(nd in n for nd in no_decay)],
"weight_decay": 0.0, 'lr': self.lr},
# 其他模块,差分学习率
{"params": [p for n, p in other_param_optimizer if not any(nd in n for nd in no_decay)],
"weight_decay": 0.0, 'lr': self.other_lr},
{"params": [p for n, p in other_param_optimizer if any(nd in n for nd in no_decay)],
"weight_decay": 0.0, 'lr': self.other_lr},
]
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=self.lr)
return optimizer
def train(self, word_lists, tag_lists,
dev_word_lists, dev_tag_lists,
word2id, tag2id):
# 对数据集按照长度进行排序
word_lists, tag_lists, _ = sort_by_lengths(word_lists, tag_lists)
dev_word_lists, dev_tag_lists, _ = sort_by_lengths(
dev_word_lists, dev_tag_lists)
B = self.batch_size
for e in range(1, self.epoches + 1):
self.step = 0
losses = 0.
for ind in range(0, len(word_lists), B):
batch_sents = word_lists[ind:ind + B]
batch_tags = tag_lists[ind:ind + B]
losses += self.train_step(batch_sents,
batch_tags, word2id, tag2id)
if self.step % BertTrainingConfig.print_step == 0:
total_step = (len(word_lists) // B + 1)
# print("Epoch {}, step/total_step: {}/{} {:.2f}% Loss:{:.4f}".format(
# e, self.step, total_step,
# 100. * self.step / total_step,
# losses / self.print_step
# ))
losses = 0.
self.scheduler.step()
# 每轮结束测试在验证集上的性能,保存最好的一个
val_loss = self.validate(
dev_word_lists, dev_tag_lists, word2id, tag2id)
print("Epoch {}, Val Loss:{:.4f}".format(e, val_loss))
def train_step(self, batch_sents, batch_tags, word2id, tag2id):
self.model.train()
self.step += 1
# 准备数据
tensorized_sents, lengths = tensorized_bert(batch_sents, word2id)
tensorized_sents = tensorized_sents.to(self.device)
targets, lengths = tensorized(batch_tags, tag2id)
targets = targets.to(self.device)
# forward
scores = self.model(tensorized_sents, lengths)
# 计算损失 更新参数
self.optimizer.zero_grad()
loss = self.cal_loss_func(scores, targets, tag2id).to(self.device)
loss.backward()
self.optimizer.step()
return loss.item()
def validate(self, dev_word_lists, dev_tag_lists, word2id, tag2id):
self.model.eval()
with torch.no_grad():
val_losses = 0.
val_step = 0
for ind in range(0, len(dev_word_lists), self.batch_size):
val_step += 1
# 准备batch数据
batch_sents = dev_word_lists[ind:ind + self.batch_size]
batch_tags = dev_tag_lists[ind:ind + self.batch_size]
tensorized_sents, lengths = tensorized_bert(
batch_sents, word2id)
tensorized_sents = tensorized_sents.to(self.device)
targets, lengths = tensorized(batch_tags, tag2id)
targets = targets.to(self.device)
# forward
scores = self.model(tensorized_sents, lengths)
# 计算损失
loss = self.cal_loss_func(
scores, targets, tag2id).to(self.device)
val_losses += loss.item()
val_loss = val_losses / val_step
if val_loss < self._best_val_loss:
print("保存模型...")
self.best_model = deepcopy(self.model)
self._best_val_loss = val_loss
return val_loss
def test(self, word_lists, tag_lists, word2id, tag2id):
"""返回最佳模型在测试集上的预测结果"""
# 准备数据
word_lists, tag_lists, indices = sort_by_lengths(word_lists, tag_lists)
tensorized_sents, lengths = tensorized_bert(word_lists, word2id)
tensorized_sents = tensorized_sents.to(self.device)
self.best_model.eval()
with torch.no_grad():
batch_tagids = self.best_model.test(
tensorized_sents, lengths, tag2id)
# 将id转化为标注
pred_tag_lists = []
id2tag = dict((id_, tag) for tag, id_ in tag2id.items())
for i, ids in enumerate(batch_tagids):
tag_list = []
if self.crf:
for j in range(lengths[i] - 1): # crf解码过程中,end被舍弃
tag_list.append(id2tag[ids[j].item()])
else:
for j in range(lengths[i]):
tag_list.append(id2tag[ids[j].item()])
pred_tag_lists.append(tag_list)
# indices存有根据长度排序后的索引映射的信息
# 比如若indices = [1, 2, 0] 则说明原先索引为1的元素映射到的新的索引是0,
# 索引为2的元素映射到新的索引是1...
# 下面根据indices将pred_tag_lists和tag_lists转化为原来的顺序
ind_maps = sorted(list(enumerate(indices)), key=lambda e: e[1])
indices, _ = list(zip(*ind_maps))
pred_tag_lists = [pred_tag_lists[i] for i in indices]
tag_lists = [tag_lists[i] for i in indices]
return pred_tag_lists, tag_lists
class BERT_BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, emb_size, hidden_size, out_size, bert_model):
"""初始化参数:
vocab_size:字典的大小
emb_size:词向量的维数
hidden_size:隐向量的维数
out_size:标注的种类
"""
super(BERT_BiLSTM_CRF, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.bilstm = nn.LSTM(emb_size, hidden_size,
batch_first=True,
bidirectional=True)
self.lin = nn.Linear(emb_size, out_size)
self.bert_model = bert_model
# CRF实际上就是多学习一个转移矩阵 [out_size, out_size] 初始化为均匀分布
self.transition = nn.Parameter(
torch.ones(out_size, out_size) * 1/out_size)
# self.transition.data.zero_()
def forward(self, sents_tensor, lengths):
# 添加,以防止警告提醒
self.bilstm.flatten_parameters()
# [B, L, out_size]
# emb = self.embedding(sents_tensor) # [B, L, emb_size]
input_ids = sents_tensor
device = input_ids.device
token_type_ids = torch.zeros(input_ids.shape).to(torch.int64).to(device)
one = torch.ones(input_ids.shape).to(torch.int64).to(device)
attention_mask = torch.where(input_ids>0, one, token_type_ids)
emb = self.bert_model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)[0]
# packed = pack_padded_sequence(emb, lengths, batch_first=True)
# rnn_out, _ = self.bilstm(packed)
# # rnn_out:[B, L, hidden_size*2]
# rnn_out, _ = pad_packed_sequence(rnn_out, batch_first=True)
emission = self.lin(emb) # [B, L, out_size]
# 计算CRF scores, 这个scores大小为[B, L, out_size, out_size]
# 也就是每个字对应对应一个 [out_size, out_size]的矩阵
# 这个矩阵第i行第j列的元素的含义是:上一时刻tag为i,这一时刻tag为j的分数
batch_size, max_len, out_size = emission.size()
crf_scores = emission.unsqueeze(
2).expand(-1, -1, out_size, -1) + self.transition.unsqueeze(0)
return crf_scores
def test(self, test_sents_tensor, lengths, tag2id):
"""使用维特比算法进行解码"""
start_id = tag2id['<start>']
end_id = tag2id['<end>']
pad = tag2id['<pad>']
tagset_size = len(tag2id)
crf_scores = self.forward(test_sents_tensor, lengths)
device = crf_scores.device
# B:batch_size, L:max_len, T:target set size
B, L, T, _ = crf_scores.size()
# viterbi[i, j, k]表示第i个句子,第j个字对应第k个标记的最大分数
viterbi = torch.zeros(B, L, T).to(device)
# backpointer[i, j, k]表示第i个句子,第j个字对应第k个标记时前一个标记的id,用于回溯
backpointer = (torch.zeros(B, L, T).long() * end_id).to(device)
lengths = torch.LongTensor(lengths).to(device)
# 向前递推
for step in range(L):
batch_size_t = (lengths > step).sum().item()
if step == 0:
# 第一个字它的前一个标记只能是start_id
viterbi[:batch_size_t, step,
:] = crf_scores[: batch_size_t, step, start_id, :]
backpointer[: batch_size_t, step, :] = start_id
else:
max_scores, prev_tags = torch.max(
viterbi[:batch_size_t, step-1, :].unsqueeze(2) +
crf_scores[:batch_size_t, step, :, :], # [B, T, T]
dim=1
)
viterbi[:batch_size_t, step, :] = max_scores
backpointer[:batch_size_t, step, :] = prev_tags
# 在回溯的时候我们只需要用到backpointer矩阵
backpointer = backpointer.view(B, -1) # [B, L * T]
tagids = [] # 存放结果
tags_t = None
for step in range(L-1, 0, -1):
batch_size_t = (lengths > step).sum().item()
if step == L-1:
index = torch.ones(batch_size_t).long() * (step * tagset_size)
index = index.to(device)
index += end_id
else:
prev_batch_size_t = len(tags_t)
new_in_batch = torch.LongTensor(
[end_id] * (batch_size_t - prev_batch_size_t)).to(device)
offset = torch.cat(
[tags_t, new_in_batch],
dim=0
) # 这个offset实际上就是前一时刻的
index = torch.ones(batch_size_t).long() * (step * tagset_size)
index = index.to(device)
index += offset.long()
try:
tags_t = backpointer[:batch_size_t].gather(
dim=1, index=index.unsqueeze(1).long())
except RuntimeError:
import pdb
pdb.set_trace()
tags_t = tags_t.squeeze(1)
tagids.append(tags_t.tolist())
# tagids:[L-1](L-1是因为扣去了end_token),大小的liebiao
# 其中列表内的元素是该batch在该时刻的标记
# 下面修正其顺序,并将维度转换为 [B, L]
tagids = list(zip_longest(*reversed(tagids), fillvalue=pad))
tagids = torch.Tensor(tagids).long()
# 返回解码的结果
return tagids
class Metrics(object):
"""用于评价BiLSTM模型,计算每个标签的精确率,召回率,F1分数"""
def __init__(self, golden_tags, predict_tags, remove_O=False):
# [[t1, t2], [t3, t4]...] --> [t1, t2, t3, t4...]
self.golden_tags = self.flatten_lists(golden_tags)
self.predict_tags = self.flatten_lists(predict_tags)
if remove_O: # 将O标记移除,只关心实体标记
self._remove_Otags()
# 辅助计算的变量
self.tagset = set(self.golden_tags)
self.correct_tags_number = self.count_correct_tags()
self.predict_tags_counter = Counter(self.predict_tags)
self.golden_tags_counter = Counter(self.golden_tags)
# 计算精确率
self.precision_scores = self.cal_precision()
# 计算召回率
self.recall_scores = self.cal_recall()
# 计算F1分数
self.f1_scores = self.cal_f1()
def flatten_lists(self, lists):
flatten_list = []
for l in lists:
if type(l) == list:
flatten_list += l
else:
flatten_list.append(l)
return flatten_list
def cal_precision(self):
precision_scores = {}
for tag in self.tagset:
try:
precision_scores[tag] = self.correct_tags_number.get(tag, 0) / \
self.predict_tags_counter[tag]
except:
precision_scores[tag] = 0
return precision_scores
def cal_recall(self):
recall_scores = {}
for tag in self.tagset:
recall_scores[tag] = self.correct_tags_number.get(tag, 0) / \
self.golden_tags_counter[tag]
return recall_scores
def cal_f1(self):
f1_scores = {}
for tag in self.tagset:
p, r = self.precision_scores[tag], self.recall_scores[tag]
f1_scores[tag] = 2*p*r / (p+r+1e-10) # 加上一个特别小的数,防止分母为0
return f1_scores
def report_scores(self):
"""将结果用表格的形式打印出来,像这个样子:
precision recall f1-score support
B-LOC 0.775 0.757 0.766 1084
I-LOC 0.601 0.631 0.616 325
B-MISC 0.698 0.499 0.582 339
I-MISC 0.644 0.567 0.603 557
B-ORG 0.795 0.801 0.798 1400
I-ORG 0.831 0.773 0.801 1104
B-PER 0.812 0.876 0.843 735
I-PER 0.873 0.931 0.901 634
avg/total 0.779 0.764 0.770 6178
"""
# 打印表头
header_format = '{:>9s} {:>9} {:>9} {:>9} {:>9}'
header = ['precision', 'recall', 'f1-score', 'support']
print(header_format.format('', *header))
row_format = '{:>9s} {:>9.4f} {:>9.4f} {:>9.4f} {:>9}'
# 打印每个标签的 精确率、召回率、f1分数
for tag in self.tagset:
print(row_format.format(
tag,
self.precision_scores[tag],
self.recall_scores[tag],
self.f1_scores[tag],
self.golden_tags_counter[tag]
))
# 计算并打印平均值
avg_metrics = self._cal_weighted_average()
print(row_format.format(
'avg/total',
avg_metrics['precision'],
avg_metrics['recall'],
avg_metrics['f1_score'],
len(self.golden_tags)
))
def count_correct_tags(self):
"""计算每种标签预测正确的个数(对应精确率、召回率计算公式上的tp),用于后面精确率以及召回率的计算"""
correct_dict = {}
for gold_tag, predict_tag in zip(self.golden_tags, self.predict_tags):
if gold_tag == predict_tag:
if gold_tag not in correct_dict:
correct_dict[gold_tag] = 1
else:
correct_dict[gold_tag] += 1
return correct_dict
def _cal_weighted_average(self):
weighted_average = {}
total = len(self.golden_tags)
# 计算weighted precisions:
weighted_average['precision'] = 0.
weighted_average['recall'] = 0.
weighted_average['f1_score'] = 0.
for tag in self.tagset:
size = self.golden_tags_counter[tag]
weighted_average['precision'] += self.precision_scores[tag] * size
weighted_average['recall'] += self.recall_scores[tag] * size
weighted_average['f1_score'] += self.f1_scores[tag] * size
for metric in weighted_average.keys():
weighted_average[metric] /= total
return weighted_average
def _remove_Otags(self):
length = len(self.golden_tags)
O_tag_indices = [i for i in range(length)
if self.golden_tags[i] == 'O']
self.golden_tags = [tag for i, tag in enumerate(self.golden_tags)
if i not in O_tag_indices]
self.predict_tags = [tag for i, tag in enumerate(self.predict_tags)
if i not in O_tag_indices]
print("原总标记数为{},移除了{}个O标记,占比{:.2f}%".format(
length,
len(O_tag_indices),
len(O_tag_indices) / length * 100
))
def report_confusion_matrix(self):
"""计算混淆矩阵"""
print("\nConfusion Matrix:")
tag_list = list(self.tagset)
# 初始化混淆矩阵 matrix[i][j]表示第i个tag被模型预测成第j个tag的次数
tags_size = len(tag_list)
matrix = []
for i in range(tags_size):
matrix.append([0] * tags_size)
# 遍历tags列表
for golden_tag, predict_tag in zip(self.golden_tags, self.predict_tags):
try:
row = tag_list.index(golden_tag)
col = tag_list.index(predict_tag)
matrix[row][col] += 1
except ValueError: # 有极少数标记没有出现在golden_tags,但出现在predict_tags,跳过这些标记
continue
# 输出矩阵
row_format_ = '{:>7} ' * (tags_size+1)
print(row_format_.format("", *tag_list))
for i, row in enumerate(matrix):
print(row_format_.format(tag_list[i], *row))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2019, Gianluca Fiore
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
###############################################################################
__author__ = "Gianluca Fiore"
import sys
import os
# Requires PIL and imagehash modules
from PIL import Image
import imagehash
CUTOFF = 20
exampleDir = '/mnt/documents/d/Camera_images/Krakow/Krakow2019'
def find_similar_images(paths, hashfunc = imagehash.average_hash):
"""Compare all the images in a given path for similarity and return a list of all that have a match"""
def is_image(filename):
f = filename.lower()
return f.endswith(".png") or f.endswith(".jpg") or f.endswith(".jpeg") or '.jpg' in f
image_filenames = []
for i in os.listdir(paths):
if is_image(i):
image_filenames.append(os.path.join(paths, i))
images = {}
for img in sorted(image_filenames):
try:
hash = hashfunc(Image.open(img))
images[img] = hash
except Exception as e:
print('Problem: ', e, ' with ', img)
similar_images = []
for k, h in images.items():
for v in images.values():
similarity = h - v
# if similarity is less than the cutoff value but above 0 (which means the images are identical), we have a match
if similarity < CUTOFF and similarity > 0:
current_value = [t for (t, w) in images.items() if w == v]
print("Comparing ", k, " with ", current_value[0])
print(similarity)
similar_images.extend([k, current_value[0]])
return similar_images
def main():
s = find_similar_images(paths=exampleDir)
# a set to remove duplicates in the resulting list
print(set(s))
if __name__ == '__main__':
status = main()
sys.exit(status)
|
import os
import logging
import sys
import numpy as np
import fitsio
import joblib
import tqdm
from ngmix.metacal import get_all_metacal
from ngmix.prepsfmom import PGaussMom
from wldeblend_sim import init_wldeblend, get_gal_wldeblend, make_ngmix_obs
from ngmix_maxlike import run_maxlike
LOGGER = logging.getLogger(__name__)
def _meas(gal, psf, redshift, nse, pixel_scale, aps, seed):
rng = np.random.RandomState(seed=seed)
obs, true_flux, obs_nn = make_ngmix_obs(
gal=gal, psf=psf, nse=nse, pixel_scale=pixel_scale, rng=rng,
)
try:
mcal_res = get_all_metacal(
obs,
psf='fitgauss',
fixnoise=True,
rng=rng,
types=["noshear", "1p", "1m"],
)
for k, v in mcal_res.items():
if v is None:
raise RuntimeError("bad mcal result!")
s2ns = []
g1s = []
g1errs = []
trs = []
flags = []
redshifts = []
maps = []
msteps = []
kinds = []
for ap in aps:
for step, mcal_obs in mcal_res.items():
mom = PGaussMom(ap).go(mcal_obs)
psf_mom = PGaussMom(ap).go(mcal_obs.psf, no_psf=True)
if psf_mom["flags"] == 0:
psf_mom_t = psf_mom["T"]
else:
psf_mom_t = np.nan
flags.append(mom["flags"] | psf_mom["flags"])
s2ns.append(mom["s2n"])
g1s.append(mom["e1"])
g1errs.append(mom["e_err"][0])
trs.append(mom["T"]/psf_mom_t)
redshifts.append(redshift)
maps.append(ap)
msteps.append(step)
kinds.append("pgauss")
for step, mcal_obs in mcal_res.items():
mom = run_maxlike(mcal_obs, rng=rng)
psf_mom = mcal_obs.psf.meta["result"]
mom["e1"] = mom["g"][0]
mom["e_err"] = mom["g_err"]
if psf_mom["flags"] == 0:
psf_mom_t = psf_mom["T"]
else:
psf_mom_t = np.nan
flags.append(mom["flags"] | psf_mom["flags"])
s2ns.append(mom["s2n"])
g1s.append(mom["e1"])
g1errs.append(mom["e_err"][0])
trs.append(mom["T"]/psf_mom_t)
redshifts.append(redshift)
maps.append(-1)
msteps.append(step)
kinds.append("mgauss")
for i in range(2):
if i == 0:
dtype = []
else:
md = np.zeros(len(flags), dtype=dtype)
for cname, arr in [
("flags", flags),
("s2n", s2ns),
("e1", g1s),
("e1_err", g1errs),
("Tratio", trs),
("redshift", redshifts),
]:
if i == 0:
dtype.append((cname, "f4"))
else:
md[cname] = np.array(arr)
if i == 0:
dtype.append(("mdet_step", "U7"))
dtype.append(("ap", "f4"))
dtype.append(("kind", "U7"))
else:
md["mdet_step"] = msteps
md["ap"] = maps
md["kind"] = kinds
return md
except Exception as e:
print("ERROR: " + repr(e), flush=True)
return None
def main():
n_per_chunk = 100
n_chunks = int(sys.argv[1]) if len(sys.argv) > 1 else 1
seed = np.random.randint(low=1, high=2**29)
rng = np.random.RandomState(seed=seed)
os.makedirs("./results_pgauss_mdet", exist_ok=True)
wldeblend_data = init_wldeblend(survey_bands="lsst-r")
aps = np.linspace(1.25, 2.75, 25)
outputs = []
with joblib.Parallel(n_jobs=-1, verbose=10, batch_size=2) as par:
for chunk in tqdm.trange(n_chunks):
jobs = []
for i in range(n_per_chunk):
gal, psf, redshift = get_gal_wldeblend(rng=rng, data=wldeblend_data)
jobs.append(joblib.delayed(_meas)(
gal, psf, redshift, wldeblend_data.noise,
wldeblend_data.pixel_scale,
aps, rng.randint(low=1, high=2**29))
)
outputs.extend([_d for _d in par(jobs) if _d is not None])
d = np.concatenate(outputs, axis=0)
fitsio.write(
"./results_pgauss_mdet/meas_seed%d.fits" % seed,
d, extname="data", clobber=True)
fitsio.write(
"./results_pgauss_mdet/meas_seed%d.fits" % seed,
aps, extname="aps")
if __name__ == "__main__":
main()
|
from setuptools import setup
setup(name='gscrapper',
version='0.1',
description='Let you scrape images from google photo',
url='http://github.com/storborg/funniest',
author='Flying Circus',
author_email='flyingcircus@example.com',
license='MIT',
packages=['funniest'],
zip_safe=False)
|
#!/usr/bin/env python3
"""
Open source data downloader
Created: 10/15/2020 Max Bertolero (mb3152@seas.upenn.edu)
"""
__doc__ = """
This python library takes in a list of subjects, the data source,
and the data type you want, and downloads them. It then can delete them.
"""
import pkg_resources as pkgrf
import argparse
import configparser
import os
import re
import sys
import threading
from cryptography.fernet import Fernet
from datetime import datetime
from functools import partial
from getpass import getpass
from glob import glob
from multiprocessing.dummy import Pool
from pandas import read_csv
from subprocess import call, check_call
class RepeatTimer(threading.Timer):
def run(self):
while not self.finished.wait(self.interval):
self.function(*self.args, **self.kwargs)
def stop(self):
self.cancel()
HOME = os.path.expanduser("~")
NDA_CREDENTIALS = os.path.join(HOME, ".data_dl", "config.ini")
HERE = os.path.dirname(os.path.abspath(sys.argv[0]))
NDA_AWS_TOKEN_MAKER = pkgrf.resource_filename('data_dl', 'data/nda_aws_token_maker.py')
def download_abcd(subjects,where,log,data,cores=1):
"""
subjects: subject list, strings in a list
where: where should we put your data?
log: where should I keep the log? we use this to show what downloaded, and use it to delete things
cores: how many cores?
"""
date_stamp = "{:%Y:%m:%d %H:%M}".format(datetime.now())
print('Data downloader called at %s with:' % date_stamp)
make_nda_token(NDA_CREDENTIALS)
# start an hourly thread ( 60 * 60 = 3600 seconds) to update the NDA download token
t = RepeatTimer(3600, make_nda_token, [NDA_CREDENTIALS])
t.start()
s3_file = pkgrf.resource_filename('data_dl', 'data/abcd_datastructure_manifest.txt')
if os.path.exists(s3_file) == False:
print ('downloading a big file (1.7GB) you need, hang tight')
os.system('wget https://www.dropbox.com/s/nzc87lnowohud0m/datastructure_manifest.txt?dl=0 -O %s'%(s3_file))
if data == 'dwi': basenames_file = pkgrf.resource_filename('data_dl', 'data/abcd_data_subsets_dwi.txt')
if data == 'all': basenames_file = pkgrf.resource_filename('data_dl', 'data/abcd_data_subsets.txt')
if data == 'anat': basenames_file = pkgrf.resource_filename('data_dl', 'data/abcd_data_subsets_anat.txt')
if data == 'jsons': basenames_file = pkgrf.resource_filename('data_dl', 'data/abcd_data_subsets.txt')
manifest_df = read_csv(s3_file, sep='\t',low_memory=False)
subject_list = get_subject_list(manifest_df, subjects)
print('\tData Subsets:\t%s' % basenames_file)
manifest_names = generate_manifest_list(basenames_file, subject_list)
print('\nReading in S3 links...')
s3_links_arr = manifest_df[manifest_df['manifest_name'].isin(manifest_names)]['associated_file'].values
if data == 'jsons':
bad = download_s3_jsons(s3_links_arr, where, log, cores)
else:
bad = download_s3_files(s3_links_arr, where, log, cores)
print('\nProblematic commands:')
for baddy in bad:
print(baddy)
t.stop()
def delete(where,log):
df = read_csv('%s/successful_downloads.txt'%(log),skiprows=1,header=None)
for f in df.iterrows():
f = f[1][0]
f = '/'.join(f.split('/')[4:])
f = '/%s/%s'%(where,f)
os.system('rm -f %s'%(f))
os.system('rm -f /%s/successful_downloads.txt'%(log))
def delete_scary(where):
os.system('rm -f -r %s'%(where))
os.system('rm /%s/successful_downloads.txt'%(log))
def get_subject_list(manifest_df, subjects):
"""
If a list of subject is provided then use that, else collect all unique
subject ids from the s3 spreadsheet and use that instead
:param manifest_df: pandas dataframe created from the s3 csv
:param subjects: array of subjects
:return: subject_list
"""
if subjects!= 'all': return subjects
# Otherwise get all subjects from the S3 spreadsheet
subject_list = set()
if subjects == 'all':
print('\tSubjects:\tAll subjects')
for manifest_name in manifest_df['manifest_name'].values:
subject_id = manifest_name.split('.')[0]
subject_list.add(subject_id)
return list(subject_list)
def generate_manifest_list(basenames_file, subject_list):
"""
Take the list of subjects and list of basenames and concatenate them to
match the ${SUBJECT}.${BASENAME}.manifest.json to match the manifest name
in the s3 file.
:param args: argparse namespace containing all CLI arguments. The specific
arguments used by this function are
:return: manifest_names: list of manifest_name
"""
# if a subject list is not provided
basenames = [line.rstrip('\n') for line in open(basenames_file)]
manifest_names = []
for sub in subject_list:
for base in basenames:
manifest = sub + '.' + base + '.manifest.json'
manifest_names += [manifest]
return manifest_names
def download_s3_jsons(s3_links_arr, output_dir, log_dir, pool_size=1):
"""
"""
bad_download = []
commands = []
success_log = os.path.join(log_dir, 'successful_downloads.txt')
failed_log = os.path.join(log_dir, 'failed_downloads.txt')
only_one_needed = [
"CHANGES",
"dataset_description.json",
"README",
"task-MID_bold.json",
"task-nback_bold.json",
"task-rest_bold.json",
"task-SST_bold.json",
"Gordon2014FreeSurferSubcortical_dparc.dlabel.nii",
"HCP2016FreeSurferSubcortical_dparc.dlabel.nii",
"Markov2012FreeSurferSubcortical_dparc.dlabel.nii",
"Power2011FreeSurferSubcortical_dparc.dlabel.nii",
"Yeo2011FreeSurferSubcortical_dparc.dlabel.nii"
]
only_one_tuple = list(zip([0]*len(only_one_needed), only_one_needed))
if os.path.isfile(success_log):
with open(success_log) as f:
success_set = set(f.readlines())
else:
success_set = set()
download_set = set()
print('Creating unique download list...')
for s3_link in s3_links_arr:
if s3_link.endswith('.json'):
if s3_link[:4] != 's3:/':
s3_path = 's3:/' + s3_link
else:
s3_path = s3_link
dest = os.path.join(output_dir, '/'.join(s3_path.split('/')[4:]))
skip = False
for i, only_one_pair in enumerate(only_one_tuple):
only_one_count = only_one_pair[0]
only_one = only_one_pair[1]
if only_one in s3_path:
if only_one_count == 0:
only_one_tuple[i] = (1, only_one)
else:
skip = True
break
if not skip and s3_path not in success_set:
# Check if the filename already in the success log
dest = os.path.join(output_dir, '/'.join(s3_path.split('/')[4:]))
if not os.path.isfile(dest):
download_set.add( (s3_path, dest) )
# make unique s3 downloads
print('Creating download commands...')
for s3_path, dest in sorted(download_set, key=lambda x: x[1]):
commands.append( ' ; '.join( [
"mkdir -p " + os.path.dirname(dest),
"aws s3 cp " + s3_path + " " + dest + " --profile NDA"
] )
)
if pool_size == 1:
print('\nDownloading files serially...')
elif pool_size > 1:
print('\nParallel downloading with %d core(s)...' % pool_size)
elif pool_size < 1:
print('\nCannot download with less than 1 core. Try changing your "-p" argument. Quitting...')
sys.exit()
pool = Pool(pool_size) # pool_size concurrent commands at a time
for i, returncode in enumerate(pool.imap(partial(call, shell=True), commands)):
s3_path = re.search('.+aws\ s3\ cp\ (s3://.+)\ ' + output_dir + '.+', commands[i]).group(1)
if returncode == 0:
with open(success_log, 'a+') as s:
s.write(s3_path + '\n')
else:
print( "Command failed: {}".format(commands[i]) )
bad_download.append(s3_path)
with open(failed_log, 'a+') as f:
f.write(s3_path + '\n')
bad_download.append(commands[i])
pool.close()
return bad_download
def download_s3_files(s3_links_arr, output_dir, log_dir, pool_size=1):
"""
"""
bad_download = []
commands = []
success_log = os.path.join(log_dir, 'successful_downloads.txt')
failed_log = os.path.join(log_dir, 'failed_downloads.txt')
only_one_needed = [
"CHANGES",
"dataset_description.json",
"README",
"task-MID_bold.json",
"task-nback_bold.json",
"task-rest_bold.json",
"task-SST_bold.json",
"Gordon2014FreeSurferSubcortical_dparc.dlabel.nii",
"HCP2016FreeSurferSubcortical_dparc.dlabel.nii",
"Markov2012FreeSurferSubcortical_dparc.dlabel.nii",
"Power2011FreeSurferSubcortical_dparc.dlabel.nii",
"Yeo2011FreeSurferSubcortical_dparc.dlabel.nii"
]
only_one_tuple = list(zip([0]*len(only_one_needed), only_one_needed))
if os.path.isfile(success_log):
with open(success_log) as f:
success_set = set(f.readlines())
else:
success_set = set()
download_set = set()
print('Creating unique download list...')
for s3_link in s3_links_arr:
if s3_link[:4] != 's3:/':
s3_path = 's3:/' + s3_link
else:
s3_path = s3_link
dest = os.path.join(output_dir, '/'.join(s3_path.split('/')[4:]))
skip = False
for i, only_one_pair in enumerate(only_one_tuple):
only_one_count = only_one_pair[0]
only_one = only_one_pair[1]
if only_one in s3_path:
if only_one_count == 0:
only_one_tuple[i] = (1, only_one)
else:
skip = True
break
if not skip and s3_path not in success_set:
# Check if the filename already in the success log
dest = os.path.join(output_dir, '/'.join(s3_path.split('/')[4:]))
if not os.path.isfile(dest):
download_set.add( (s3_path, dest) )
# make unique s3 downloads
print('Creating download commands...')
for s3_path, dest in sorted(download_set, key=lambda x: x[1]):
commands.append( ' ; '.join( [
"mkdir -p " + os.path.dirname(dest),
"aws s3 cp " + s3_path + " " + dest + " --profile NDA"
] )
)
if pool_size == 1:
print('\nDownloading files serially...')
elif pool_size > 1:
print('\nParallel downloading with %d core(s)...' % pool_size)
elif pool_size < 1:
print('\nCannot download with less than 1 core. Try changing your "-p" argument. Quitting...')
sys.exit()
pool = Pool(pool_size) # pool_size concurrent commands at a time
for i, returncode in enumerate(pool.imap(partial(call, shell=True), commands)):
s3_path = re.search('.+aws\ s3\ cp\ (s3://.+)\ ' + output_dir + '.+', commands[i]).group(1)
if returncode == 0:
with open(success_log, 'a+') as s:
s.write(s3_path + '\n')
else:
print( "Command failed: {}".format(commands[i]) )
bad_download.append(s3_path)
with open(failed_log, 'a+') as f:
f.write(s3_path + '\n')
bad_download.append(commands[i])
pool.close()
return bad_download
def make_nda_token(credentials):
"""
Create NDA token by getting credentials from config file. If no config file
exists yet, or user specified to make a new one by entering their NDA
credentials as CLI args, then create one to store NDA credentials.
:param args: argparse namespace containing all CLI arguments. The specific
arguments used by this function are --username, --password, and --config.
:return: N/A
"""
# If config file with NDA credentials exists, then get credentials from it,
# unless user entered other credentials to make a new config file
# First make sure ~/.aws directory exists
os.makedirs(os.path.join(HOME, '.aws'), exist_ok=True)
if os.path.exists(credentials):
username, password = get_nda_credentials_from(credentials)
# Otherwise get NDA credentials from user & save them in a new config file,
# overwriting the existing config file if user gave credentials as cli args
else:
# If NDA username was a CLI arg, use it; otherwise prompt user for it
username = input("\nEnter your NIMH Data Archives username: ")
# If NDA password was a CLI arg, use it; otherwise prompt user for it
password = getpass("Enter your NIMH Data Archives password: ")
make_config_file(credentials, username, password)
# Try to make NDA token
token_call_exit_code = call((
"python3",
NDA_AWS_TOKEN_MAKER,
username,
password
))
# If NDA credentials are invalid, tell user so without printing password.
# Manually catch error instead of using try-except to avoid trying to
# catch another file's exception.
if token_call_exit_code != 0:
print("Failed to create NDA token using the username and decrypted "
"password from {}.".format(os.path.abspath(credentials)))
sys.exit(1)
def get_nda_credentials_from(config_file_path):
"""
Given the path to a config file, returns user's NDA credentials.
:param config_file_path: Path to file containing user's NDA username,
encrypted form of user's NDA password, and key to that encryption.
:return: Two variables: user's NDA username and password.
"""
# Object to read/write config file containing NDA credentials
config = configparser.ConfigParser()
config.read(config_file_path)
# Get encrypted password and encryption key from config file
encryption_key = config["NDA"]["key"]
encrypted_password = config["NDA"]["encrypted_password"]
# Decrypt password to get user's NDA credentials
username = config["NDA"]["username"]
password = (
Fernet(encryption_key.encode("UTF-8"))
.decrypt(token=encrypted_password.encode("UTF-8"))
.decode("UTF-8")
)
return username, password
def make_config_file(config_filepath, username, password):
"""
Create a config file to save user's NDA credentials.
:param config_filepath: Name and path of config file to create.
:param username: User's NDA username to save in config file.
:param password: User's NDA password to encrypt then save in config file.
:return: N/A
"""
# Object to read/write config file containing NDA credentials
config = configparser.ConfigParser()
# Encrypt user's NDA password by making an encryption key
encryption_key = Fernet.generate_key()
encrypted_password = (
Fernet(encryption_key).encrypt(password.encode("UTF-8"))
)
# Save the encryption key and encrypted password to a new config file
config["NDA"] = {
"username": username,
"encrypted_password": encrypted_password.decode("UTF-8"),
"key": encryption_key.decode("UTF-8")
}
if not os.path.exists(os.path.dirname(config_filepath)):
os.makedirs(os.path.dirname(config_filepath))
with open(config_filepath, "w") as configfile:
config.write(configfile)
# Change permissions of the config file to prevent other users accessing it
check_call( ("chmod", "700", config_filepath) )
|
import os
import tensorflow as tf
from modules.models import RRDB_Model, DiscriminatorVGG128, mrUNet
from modules.lr_scheduler import MultiStepLR
from modules.losses import (PixelLoss, ContentLoss, DiscriminatorLoss,
GeneratorLoss)
from modules.utils import (load_yaml, load_dataset, ProgressBar,
set_memory_growth)
def main():
# define network
input_size = 256
channels = 3
batch = 16
niter = 400000
generator = RRDB_Model(input_size, channels)
generator.summary(line_length=80)
discriminator = DiscriminatorVGG128(input_size, channels)
discriminator.summary(line_length=80)
# load dataset
dataset_path = 'PATH' # TODO
train_dataset = load_dataset(
dataset_path, input_size, batch, shuffle=False)
# define optimizer
learning_rate_G = MultiStepLR(
1e-4, [50000, 100000, 200000, 300000], 0.5)
learning_rate_D = MultiStepLR(
1e-4, [50000, 100000, 200000, 300000], 0.5)
optimizer_G = tf.keras.optimizers.Adam(learning_rate=learning_rate_G,
beta_1=0.9,
beta_2=0.99)
optimizer_D = tf.keras.optimizers.Adam(learning_rate=learning_rate_D,
beta_1=0.9,
beta_2=0.99)
# define losses function
pixel_loss_fn = PixelLoss(criterion='l1')
fea_loss_fn = ContentLoss(criterion='l1')
gen_loss_fn = GeneratorLoss(gan_type='ragan')
dis_loss_fn = DiscriminatorLoss(gan_type='ragan')
# load checkpoint
checkpoint_dir = './checkpoints/esrgan'
checkpoint = tf.train.Checkpoint(step=tf.Variable(0, name='step'),
optimizer_G=optimizer_G,
optimizer_D=optimizer_D,
model=generator,
discriminator=discriminator)
manager = tf.train.CheckpointManager(checkpoint=checkpoint,
directory=checkpoint_dir,
max_to_keep=3)
if manager.latest_checkpoint:
checkpoint.restore(manager.latest_checkpoint)
print('[*] load ckpt from {} at step {}.'.format(
manager.latest_checkpoint, checkpoint.step.numpy()))
else:
print("[*] training from scratch.")
# define training step function
@tf.function
def train_step(lr, hr):
with tf.GradientTape(persistent=True) as tape:
sr = generator(lr, training=True)
hr_output = discriminator(hr, training=True)
sr_output = discriminator(sr, training=True)
losses_G = {}
losses_D = {}
losses_G['reg'] = tf.reduce_sum(generator.losses)
losses_D['reg'] = tf.reduce_sum(discriminator.losses)
losses_G['pixel'] = 1e-2 * pixel_loss_fn(hr, sr)
losses_G['feature'] = 1.0 * fea_loss_fn(hr, sr)
losses_G['gan'] = 5e-3 * gen_loss_fn(hr_output, sr_output)
losses_D['gan'] = dis_loss_fn(hr_output, sr_output)
total_loss_G = tf.add_n([l for l in losses_G.values()])
total_loss_D = tf.add_n([l for l in losses_D.values()])
grads_G = tape.gradient(
total_loss_G, generator.trainable_variables)
grads_D = tape.gradient(
total_loss_D, discriminator.trainable_variables)
optimizer_G.apply_gradients(
zip(grads_G, generator.trainable_variables))
optimizer_D.apply_gradients(
zip(grads_D, discriminator.trainable_variables))
return total_loss_G, total_loss_D, losses_G, losses_D
# training loop
summary_writer = tf.summary.create_file_writer('./logs/esrgan')
prog_bar = ProgressBar(niter, checkpoint.step.numpy())
remain_steps = max(niter - checkpoint.step.numpy(), 0)
for lr, hr in train_dataset.take(remain_steps):
checkpoint.step.assign_add(1)
steps = checkpoint.step.numpy()
total_loss_G, total_loss_D, losses_G, losses_D = train_step(lr, hr)
prog_bar.update(
"loss_G={:.4f}, loss_D={:.4f}, lr_G={:.1e}, lr_D={:.1e}".format(
total_loss_G.numpy(), total_loss_D.numpy(),
optimizer_G.lr(steps).numpy(), optimizer_D.lr(steps).numpy()))
if steps % 10 == 0:
with summary_writer.as_default():
tf.summary.scalar(
'loss_G/total_loss', total_loss_G, step=steps)
tf.summary.scalar(
'loss_D/total_loss', total_loss_D, step=steps)
for k, l in losses_G.items():
tf.summary.scalar('loss_G/{}'.format(k), l, step=steps)
for k, l in losses_D.items():
tf.summary.scalar('loss_D/{}'.format(k), l, step=steps)
tf.summary.scalar(
'learning_rate_G', optimizer_G.lr(steps), step=steps)
tf.summary.scalar(
'learning_rate_D', optimizer_D.lr(steps), step=steps)
if steps % 5000 == 0:
manager.save()
print("\n[*] save ckpt file at {}".format(
manager.latest_checkpoint))
print("\n [*] training done!")
if __name__ == '__main__':
main()
|
from typing import Dict, List, Any
import numpy
from overrides import overrides
from ..instance import TextInstance, IndexedInstance
from ...data_indexer import DataIndexer
class TaggingInstance(TextInstance):
"""
A ``TaggingInstance`` represents a passage of text and a tag sequence over that text.
There are some sticky issues with tokenization and how exactly the label is specified. For
example, if your label is a sequence of tags, that assumes a particular tokenization, which
interacts in a funny way with our tokenization code. This is a general superclass containing
common functionality for most simple sequence tagging tasks. The specifics of reading in data
from a file and converting that data into properly-indexed tag sequences is left to subclasses.
"""
def __init__(self, text: str, label: Any, index: int=None):
super(TaggingInstance, self).__init__(label, index)
self.text = text
def __str__(self):
return "TaggedSequenceInstance(" + self.text + ", " + str(self.label) + ")"
@overrides
def words(self) -> Dict[str, List[str]]:
words = self._words_from_text(self.text)
words['tags'] = self.tags_in_label()
return words
def tags_in_label(self):
"""
Returns all of the tag words in this instance, so that we can convert them into indices.
This is called in ``self.words()``. Not necessary if you have some pre-indexed labeling
scheme.
"""
raise NotImplementedError
def _index_label(self, label: Any, data_indexer: DataIndexer) -> List[int]:
"""
Index the labels. Since we don't know what form the label takes, we leave it to subclasses
to implement this method. If you need to convert tag names into indices, use the namespace
'tags' in the ``DataIndexer``.
"""
raise NotImplementedError
def to_indexed_instance(self, data_indexer: DataIndexer):
text_indices = self._index_text(self.text, data_indexer)
label_indices = self._index_label(self.label, data_indexer)
assert len(text_indices) == len(label_indices), "Tokenization is off somehow"
return IndexedTaggingInstance(text_indices, label_indices, self.index)
class IndexedTaggingInstance(IndexedInstance):
def __init__(self, text_indices: List[int], label: List[int], index: int=None):
super(IndexedTaggingInstance, self).__init__(label, index)
self.text_indices = text_indices
@classmethod
@overrides
def empty_instance(cls):
return TaggingInstance([], label=None, index=None)
@overrides
def get_lengths(self) -> Dict[str, int]:
return self._get_word_sequence_lengths(self.text_indices)
@overrides
def pad(self, max_lengths: Dict[str, int]):
self.text_indices = self.pad_word_sequence(self.text_indices, max_lengths,
truncate_from_right=False)
self.label = self.pad_sequence_to_length(self.label,
desired_length=max_lengths['num_sentence_words'],
default_value=lambda: self.label[0],
truncate_from_right=False)
@overrides
def as_training_data(self):
text_array = numpy.asarray(self.text_indices, dtype='int32')
label_array = numpy.asarray(self.label, dtype='int32')
return text_array, label_array
|
import numpy as np
def gini(array):
"""Calculate the Gini coefficient of a numpy array."""
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
array = array.flatten()
if np.amin(array) < 0:
# Values cannot be negative:
array -= np.amin(array)
# Values cannot be 0:
array += 0.0000001
# Values must be sorted:
array = np.sort(array)
# Index per array element:
index = np.arange(1,array.shape[0]+1)
# Number of array elements:
n = array.shape[0]
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
|
import pygame
import consoleHandling
import tilemap
from player import Player
class Game:
def __init__(self):
"""
State 0: Menu
State 1: Game
State 2: Pause
"""
self.state = 1
# Load spritesheet
consoleHandling.printToGameConsole("Loading sprites.")
try:
self.spritesheet = pygame.image.load("spritesheet.png").convert_alpha()
except pygame.error:
consoleHandling.printToGameConsole("Error loading sprites.")
# Cut spritesheet
self.sprites = {}
def appendSprite(spriteName, rects):
sprite = []
for r in rects:
image = pygame.Surface(pygame.Rect(r).size, pygame.SRCALPHA).convert_alpha()
image.blit(self.spritesheet, (0, 0), pygame.Rect(r))
sprite.append(image)
self.sprites[spriteName] = sprite
# Tiles.
tiles = (
(0, 0, 32, 32), # Standard tile.
)
appendSprite("tiles", tiles)
# Player passive.
playerPassive = (
(0, 32, 32, 32),
(0, 0, 0, 0) # To remove error.
)
appendSprite("playerPassive", playerPassive)
# Camera
self.cameraX = 0
self.cameraY = 0
# Player
self.player = Player()
def updateGame(self, pg, pressed):
# Update input.
if pressed[pg.K_w]:
self.player.y -= 5
if pressed[pg.K_s]:
self.player.y += 5
if pressed[pg.K_a]:
self.player.x -= 5
if pressed[pg.K_d]:
self.player.x += 5
# Update camera. Camera will never view things out of the screen, so it will only be within the tilemap.
# X value.
if (self.player.x + (self.player.width / 2)) - (tilemap.gameWidth / 2) < 0:
self.cameraX = 0
elif (self.player.x + (self.player.width / 2)) + (tilemap.gameWidth / 2) > tilemap.columns * tilemap.tileWidth:
self.cameraX = tilemap.columns * tilemap.tileWidth - tilemap.gameWidth
else:
self.cameraX = self.player.x + (self.player.width / 2) - (tilemap.gameWidth / 2)
# Y value.
if (self.player.y + (self.player.height / 2)) - (tilemap.gameHeight / 2) < 0:
self.cameraY = 0
elif (self.player.y + (self.player.height / 2)) + (tilemap.gameHeight / 2) > tilemap.rows * tilemap.tileHeight:
self.cameraY = tilemap.rows * tilemap.tileHeight - tilemap.gameHeight
else:
self.cameraY = self.player.y + (self.player.height / 2) - (tilemap.gameHeight / 2)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" timeseriesframeinterface.py
Description:
"""
# Package Header #
from ..__header__ import *
# Header #
__author__ = __author__
__credits__ = __credits__
__maintainer__ = __maintainer__
__email__ = __email__
# Imports #
# Standard Libraries #
from abc import abstractmethod
# Third-Party Packages #
# Local Packages #
from ..dataframeinterface import DataFrameInterface
# Definitions #
# Classes #
class TimeSeriesFrameInterface(DataFrameInterface):
# Magic Methods #
# Construction/Destruction
# def __init__(self, data=None, times=True, init=True):
# super().__init__()
# self.axis = 0
# self.sample_rate = 0
#
# self.data = None
# self.times = None
#
# if init:
# self.construct(data=data, times=times)
# Instance Methods #
# Constructors/Destructors
# def construct(self, data=None, times=None):
# if data is not None:
# self.data = data
#
# if times is not None:
# self.times = times
# Getters
@abstractmethod
def get_length(self):
pass
@abstractmethod
def get_item(self, item):
pass
@abstractmethod
def get_time_axis(self):
pass
# Data
@abstractmethod
def get_range(self, start=None, stop=None, step=None):
pass
@abstractmethod
def get_time(self, super_index):
pass # return self.time[super_index]
@abstractmethod
def get_times(self, start=None, stop=None, step=None):
pass # return self.times[slice(start, stop, step)]
# Find
@abstractmethod
def find_time_index(self, timestamp, aprox=False, tails=False):
pass
@abstractmethod
def find_time_sample(self, timestamp, aprox=False, tails=False):
pass
# Get with Time
def get_timestamp_range_time(self, start=None, stop=None, aprox=False, tails=False):
start_sample, true_start = self.find_time_sample(timestamp=start, aprox=aprox, tails=tails)
end_sample, true_end = self.find_time_sample(timestamp=stop, aprox=aprox, tails=tails)
return self.get_times(start_sample, end_sample), true_start, true_end
def get_data_range_time(self, start=None, stop=None, aprox=False, tails=False):
start_sample, true_start = self.find_time_sample(timestamp=start, aprox=aprox, tails=tails)
end_sample, true_end = self.find_time_sample(timestamp=stop, aprox=aprox, tails=tails)
return self.get_range(start_sample, end_sample), true_start, true_end
# Shape
@abstractmethod
def validate_shape(self):
pass
@abstractmethod
def change_size(self, shape=None, **kwargs):
pass
# Sample Rate
@abstractmethod
def validate_sample_rate(self):
pass
@abstractmethod
def resample(self, sample_rate, **kwargs):
pass
# Continuous Data
@abstractmethod
def validate_continuous(self):
pass
@abstractmethod
def make_continuous(self):
pass
|
from dataclasses import asdict
from sanic.response import json
from extra_hours.account.commands import CreateUserCommand, AuthenticateUserCommand, ChangeUserPasswordCommand
def init_account(**kwargs):
app = kwargs.get('app')
uow = kwargs.get('uow')
get_create_user = kwargs.get('get_create_user')
get_authenticate_user = kwargs.get('get_authenticate_user')
get_change_user_password = kwargs.get('get_change_user_password')
@app.post('/api/v1/account')
def create_account(request):
with uow():
command = CreateUserCommand(email=request.json.get('email', ''),
password=request.json.get('password', ''))
use_case = get_create_user()
use_case.execute(command)
if not use_case.is_valid:
errors = [n.message for n in use_case.notifications]
return json(body={'data': None, 'errors': errors}, status=400)
return json(body={'data': asdict(command), 'errors': []})
@app.post('/api/v1/account/authenticate')
def authenticate_user(request):
command = AuthenticateUserCommand(email=request.json.get('email', ''),
password=request.json.get('password', ''))
use_case = get_authenticate_user()
token = use_case.execute(command)
if not use_case.is_valid:
errors = [n.message for n in use_case.notifications]
return json(body={'data': None, 'errors': errors}, status=400)
return json(body={'data': token, 'errors': []})
@app.post('/api/v1/account/change-password')
def change_user_password(request):
with uow():
command = ChangeUserPasswordCommand(email=request.json.get('email'),
old_password=request.json.get('old_password'),
new_password=request.json.get('new_password'))
use_case = get_change_user_password()
use_case.execute(command)
if not use_case.is_valid:
errors = [n.message for n in use_case.notifications]
return json(body={'data': None, 'errors': errors}, status=400)
return json(body={'data': asdict(command), 'errors': []})
|
import asyncio
from datetime import datetime
from io import BytesIO
from typing import Tuple
import PIL
from aiohttp import ClientSession
from ..errors import (InvalidContentType, InvalidToken, JsonDecodeException,
RequestError)
from ..ratelimit import RateLimiter
from ..request import Request
from .upload import Upload
class ImADev:
DISABLED = ('php', 'html', 'js', 'css', 'ts')
def __init__(self, token: str = None):
self.token = token
self._http = ClientSession()
self._http = RateLimiter(self._http)
if self.token is None:
raise ValueError('No token specified')
elif type(self.token) != str:
raise ValueError('token argument not str, got {}'.format(self.token.__class__.__name__))
async def upload(self, data: Tuple[str, bytes] = None):
file = data[1]
if data is None:
raise ValueError('file argument not specified')
if type(file) != bytes:
raise ValueError('file argument not bytes, got {}'.format(file.__class__.__name__))
if(data[0] in self.DISABLED):
raise InvalidContentType('{} is a API blacklisted file extension.'.format(data[0]))
payload = {
'token': self.token,
'endpoint': 'upload'
}
files = { "image": data }
req = Request(self._http, 'upload.php', 'POST', data=payload, files=files)
try:
json = await req.json()
except:
raise JsonDecodeException('Failed to decode json from API. Raw content:\n\n' + str(await req.read()))
try:
return Upload(json['filename'], json['username'], json['url'].replace("\\", ""), datetime.now())
except:
if 'error' in json:
raise RequestError(json['error'])
else:
raise RequestError('fatal error uploading.')
async def get_upload(self, filename: str = None):
if filename is None:
raise ValueError('filename argument not specified')
if type(filename) != str:
raise ValueError('filename argument not str, got {}'.format(filename.__class__.__name__))
payload = {
'token': self.token,
'endpoint': 'get_upload',
'filename': filename
}
req = Request(self._http, 'upload.php', 'POST', data=payload)
try:
json = await req.json()
except:
raise JsonDecodeException('Failed to decode json from API. Raw content:\n\n' + await req.read())
try:
return Upload(json['filename'], json['username'], json['url'].replace("\\", ""), datetime.strptime(json['uploaded_at'], '%B %d %Y %H:%M:%S'))
except:
if 'error' in json:
raise RequestError(json['error'])
else:
raise RequestError('fatal error getting upload info.')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# QTPyLib: Quantitative Trading Python Library
# https://github.com/ranaroussi/qtpylib
#
# Copyright 2016-2018 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from qtpylib.algo import Algo
from qtpylib import futures
class TestStrategy(Algo):
"""
Example: This Strategy buys/sells single contract of the
S&P E-mini Futures (ES) every 10th tick with a +/- 0.5
tick target/stop using LIMIT order.
If still in position for next 5 ticks, an exit order is issued.
"""
count = 0
# ---------------------------------------
def on_start(self):
""" initilize tick counter """
self.count = 0
# ---------------------------------------
def on_quote(self, instrument):
# quote = instrument.get_quote()
# ^^ quote data available via get_quote()
pass
# ---------------------------------------
def on_orderbook(self, instrument):
pass
# ---------------------------------------
def on_fill(self, instrument, order):
pass
# ---------------------------------------
def on_tick(self, instrument):
# increase counter and do nothing if nor 10th tick
self.count += 1
if self.count % 10 != 0:
return
# continue ...
# get last tick dict
tick = instrument.get_ticks(lookback=1, as_dict=True)
if instrument.positions['position']:
print(instrument.symbol, "still in position. Exiting...")
instrument.exit()
else:
if instrument.pending_orders:
print(instrument.symbol, "has a pending order. Wait...")
else:
# random order direction
direction = random.choice(["BUY", "SELL"])
print(instrument.symbol,
'not in position. Sending a bracket ', direction, 'order...')
if direction == "BUY":
target = tick['last'] + 0.5
stoploss = tick['last'] - 0.5
else:
target = tick['last'] - 0.5
stoploss = tick['last'] + 0.5
instrument.order(direction, 1,
limit_price=tick['last'],
target=target,
initial_stop=stoploss,
trail_stop_at=0,
trail_stop_by=0,
expiry=5
)
# record action
self.record(take_action=1)
# ---------------------------------------
def on_bar(self, instrument):
# continue ...
# # nothing exiting here...
# bar = instrument.get_bars(lookback=1, as_dict=True)
# print("BAR:", bar)
# increase counter and do nothing if nor 10th tick
self.count += 1
if self.count % 5 != 0:
return
# continue ...
# get last tick dict
bar = instrument.get_bars(lookback=1, as_dict=True)
print("BAR:", bar)
if instrument.positions['position']:
print(instrument.symbol, "still in position. Exiting...")
instrument.exit()
else:
if instrument.pending_orders:
print(instrument.symbol, "has a pending order. Wait...")
else:
# random order direction
direction = random.choice(["BUY", "SELL"])
print(instrument.symbol,
'not in position. Sending a bracket ', direction, 'order...')
if direction == "BUY":
target = bar['close'] + 0.5
stoploss = bar['close'] - 0.5
else:
target = bar['close'] - 0.5
stoploss = bar['close'] + 0.5
instrument.order(direction, 1,
limit_price=bar['close'],
target=target,
initial_stop=stoploss,
trail_stop_at=0,
trail_stop_by=0,
expiry=5
)
# record action
self.record(take_action=1)
# ===========================================
if __name__ == "__main__":
# get most active ES contract to trade
ACTIVE_MONTH = futures.get_active_contract("ES")
print("Active month for ES is:", ACTIVE_MONTH)
# --backtest true --start # 2019-12-01 --end 2019-12-02 --data /Users/sponraj/Desktop/my_data_csv --output ./portfolio.csv
strategy = TestStrategy(
instruments=[("ES", "FUT", "GLOBEX", "USD", 202009, 0.0, "")],
resolution="1H",
tick_window=10,
bar_window=10,
ibport=7497,
backtest = True,
start = '2019-12-15',
end= '2019-12-31',
data='/Users/sponraj/Desktop/my_data_csv/',
output='./portfolio.csv'
)
strategy.run()
|
# $Id$
#
# Copyright (C) 2001-2008 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""utility functions for clustering
"""
def GetNodeList(cluster):
"""returns an ordered list of all nodes below cluster
the ordering is done using the lengths of the child nodes
**Arguments**
- cluster: the cluster in question
**Returns**
- a list of the leaves below this cluster
"""
if len(cluster) == 1:
return [cluster]
else:
children = cluster.GetChildren()
children.sort(key=lambda x: len(x), reverse=True)
res = []
for child in children:
res += GetNodeList(child)
res += [cluster]
return res
def GetNodesDownToCentroids(cluster, above=1):
"""returns an ordered list of all nodes below cluster
"""
if hasattr(cluster, '_isCentroid'):
cluster._aboveCentroid = 0
above = -1
else:
cluster._aboveCentroid = above
if len(cluster) == 1:
return [cluster]
else:
res = []
children = cluster.GetChildren()
children.sort(lambda x, y: cmp(len(y), len(x)))
for child in children:
res = res + GetNodesDownToCentroids(child, above)
res = res + [cluster]
return res
def FindClusterCentroidFromDists(cluster, dists):
""" find the point in a cluster which has the smallest summed
Euclidean distance to all others
**Arguments**
- cluster: the cluster to work with
- dists: the distance matrix to use for the points
**Returns**
- the index of the centroid point
"""
children = cluster.GetPoints()
pts = [x.GetData() for x in children]
best = 1e24
bestIdx = -1
for pt in pts:
dAccum = 0.0
# loop over others and add'em up
for other in pts:
if other != pt:
if other > pt:
row, col = pt, other
else:
row, col = other, pt
dAccum += dists[col * (col - 1) / 2 + row]
if dAccum >= best:
# minor efficiency hack
break
if dAccum < best:
best = dAccum
bestIdx = pt
for i in range(len(pts)):
pt = pts[i]
if pt != bestIdx:
if pt > bestIdx:
row, col = bestIdx, pt
else:
row, col = pt, bestIdx
children[i]._distToCenter = dists[col * (col - 1) / 2 + row]
else:
children[i]._distToCenter = 0.0
children[i]._clustCenter = bestIdx
cluster._clustCenter = bestIdx
cluster._distToCenter = 0.0
return bestIdx
def _BreadthFirstSplit(cluster, n):
""" *Internal Use Only*
"""
if len(cluster) < n:
raise ValueError('Cannot split cluster of length %d into %d pieces' % (len(cluster), n))
if len(cluster) == n:
return cluster.GetPoints()
clusters = [cluster]
nxtIdx = 0
for i in range(n - 1):
while nxtIdx < len(clusters) and len(clusters[nxtIdx]) == 1:
nxtIdx += 1
assert nxtIdx < len(clusters)
children = clusters[nxtIdx].GetChildren()
children.sort(key=lambda x: x.GetMetric(), reverse=True)
for child in children:
clusters.append(child)
del clusters[nxtIdx]
return clusters
def _HeightFirstSplit(cluster, n):
""" *Internal Use Only*
"""
if len(cluster) < n:
raise ValueError('Cannot split cluster of length %d into %d pieces' % (len(cluster), n))
if len(cluster) == n:
return cluster.GetPoints()
clusters = [cluster]
for i in range(n - 1):
nxtIdx = 0
while nxtIdx < len(clusters) and len(clusters[nxtIdx]) == 1:
nxtIdx += 1
assert nxtIdx < len(clusters)
children = clusters[nxtIdx].GetChildren()
for child in children:
clusters.append(child)
del clusters[nxtIdx]
clusters.sort(key=lambda x: x.GetMetric(), reverse=True)
return clusters
def SplitIntoNClusters(cluster, n, breadthFirst=1):
""" splits a cluster tree into a set of branches
**Arguments**
- cluster: the root of the cluster tree
- n: the number of clusters to include in the split
- breadthFirst: toggles breadth first (vs depth first) cleavage
of the cluster tree.
**Returns**
- a list of sub clusters
"""
if breadthFirst:
return _BreadthFirstSplit(cluster, n)
else:
return _HeightFirstSplit(cluster, n)
|
from django.db import models
from djcelery.models import TaskMeta
from jsonfield import JSONField
from asset.models import AssetInfo
__all__ = [
'Tools',
'ToolsResults',
'Variable'
]
cmd_list = [
'shell',
]
class Tools(models.Model):
TOOL_RUN_TYPE = (
('shell', 'shell'),
('python', 'python'),
('yml', 'yml'),
)
name = models.CharField(max_length=255, verbose_name='工具名称', unique=True)
tool_script = models.TextField(verbose_name='脚本', null=True, blank=True)
tool_run_type = models.CharField(choices=TOOL_RUN_TYPE, verbose_name='脚本类型', max_length=24)
comment = models.TextField(verbose_name='工具说明', null=True, blank=True)
ctime = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
utime = models.DateTimeField(auto_now=True, verbose_name='更新时间')
def __str__(self):
return self.name
class Meta:
db_table = "Tools"
verbose_name = "工具"
verbose_name_plural = verbose_name
class ToolsResults(models.Model):
task_id = models.UUIDField(max_length=255, verbose_name='任务ID', unique=True)
add_user = models.CharField(max_length=255, verbose_name='创建者', null=True, blank=True)
ctime = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
@property
def status(self):
status = TaskMeta.objects.get(task_id=self.task_id).status
return status
class Meta:
db_table = "ToolsResults"
verbose_name = "任务"
verbose_name_plural = verbose_name
class Variable(models.Model):
name = models.CharField(max_length=200, verbose_name='变量组名字')
desc = models.TextField(null=True, blank=True, verbose_name='描述')
vars = JSONField(null=True, blank=True, default={}, verbose_name='变量')
assets = models.ManyToManyField(AssetInfo, verbose_name='关联资产', related_name='asset', blank=True)
ctime = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
utime = models.DateTimeField(auto_now=True, verbose_name='更新时间')
def __str__(self):
return self.name
class Meta:
db_table = "Variable"
verbose_name = "变量组"
verbose_name_plural = verbose_name
|
import pandas
import numpy
import sys
import unittest
import os
import copy
import warnings
import tempfile
from isatools import isatab
sys.path.append("..")
import nPYc
from nPYc.enumerations import AssayRole, SampleType
from nPYc.utilities._nmr import qcCheckBaseline
from generateTestDataset import generateTestDataset
class test_nmrdataset_synthetic(unittest.TestCase):
def setUp(self):
self.noSamp = numpy.random.randint(50, high=100, size=None)
self.noFeat = numpy.random.randint(200, high=400, size=None)
self.dataset = generateTestDataset(self.noSamp, self.noFeat, dtype='NMRDataset',
variableType=nPYc.enumerations.VariableType.Spectral,
sop='GenericNMRurine')
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
dataset = nPYc.NMRDataset('', fileType='empty')
dataset.sampleMetadata['Sample File Name'] = ['Test1_serum_Rack1_SLT_090114/101',
'Test_serum_Rack10_SLR_090114/10',
'Test2_serum_Rack100_DLT_090114/102',
'Test2_urine_Rack103_MR_090114/20',
'Test2_serum_Rack010_JTP_090114/80',
'Test1_water_Rack10_TMP_090114/90']
dataset._getSampleMetadataFromFilename(dataset.Attributes['filenameSpec'])
rack = pandas.Series([1, 10, 100, 103, 10, 10],
name='Rack',
dtype=int)
pandas.testing.assert_series_equal(dataset.sampleMetadata['Rack'], rack)
study = pandas.Series(['Test1', 'Test', 'Test2', 'Test2', 'Test2', 'Test1'],
name='Study',
dtype=str)
pandas.testing.assert_series_equal(dataset.sampleMetadata['Study'], study)
def test_nmrdataset_raises(self):
self.assertRaises(NotImplementedError, nPYc.NMRDataset, '', fileType='Unknown import type')
self.assertRaises(TypeError, nPYc.NMRDataset, '', fileType='Bruker', bounds='not a list')
self.assertRaises(TypeError, nPYc.NMRDataset, '', fileType='Bruker', calibrateTo='not a number')
self.assertRaises(TypeError, nPYc.NMRDataset, '', fileType='Bruker', variableSize=0.1)
def test_load_npc_lims_masking_reruns(self):
limspath = os.path.join('..', '..', 'npc-standard-project', 'Derived_Worklists', 'UnitTest1_NMR_urine_PCSOP.011.csv')
dataset = nPYc.NMRDataset('', 'empty')
dataset.sampleMetadata = pandas.DataFrame([], columns=['Sample File Name'])
dataset.sampleMetadata['Sample File Name'] = ['UnitTest1_Urine_Rack1_SLL_270814/10', 'UnitTest1_Urine_Rack1_SLL_270814/12', 'UnitTest1_Urine_Rack1_SLL_270814/20', 'UnitTest1_Urine_Rack1_SLL_270814/30', 'UnitTest1_Urine_Rack1_SLL_270814/40','UnitTest1_Urine_Rack1_SLL_270814/51', 'UnitTest1_Urine_Rack1_SLL_270814/52', 'UnitTest1_Urine_Rack1_SLL_270814/50', 'UnitTest1_Urine_Rack1_SLL_270814/60', 'UnitTest1_Urine_Rack1_SLL_270814/70', 'UnitTest1_Urine_Rack1_SLL_270814/80', 'UnitTest1_Urine_Rack1_SLL_270814/81', 'UnitTest1_Urine_Rack1_SLL_270814/90']
dataset.intensityData = numpy.zeros((13, 2))
dataset.intensityData[:, 0] = numpy.arange(1, 14, 1)
dataset.initialiseMasks()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
dataset.addSampleInfo(descriptionFormat='NPC LIMS', filePath=limspath)
# check
assert issubclass(w[0].category, UserWarning)
assert "previous acquisitions masked, latest is kept" in str(w[0].message)
with self.subTest(msg='Masking of reruns'):
expectedMask = numpy.array([False, True, True, True, True, False, True, False, True, True, False, True, True], dtype=bool)
numpy.testing.assert_array_equal(dataset.sampleMask, expectedMask)
def test_updateMasks_samples(self):
from nPYc.enumerations import VariableType, DatasetLevel, AssayRole, SampleType
dataset = generateTestDataset(18, 5, dtype='NMRDataset',
variableType=nPYc.enumerations.VariableType.Spectral,
sop='GenericNMRurine')
dataset.Attributes.pop('LWFailThreshold', None)
dataset.Attributes.pop('baselineCheckRegion', None)
dataset.Attributes.pop('solventPeakCheckRegion', None)
dataset.sampleMetadata['AssayRole'] = pandas.Series([AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference],
name='AssayRole',
dtype=object)
dataset.sampleMetadata['SampleType'] = pandas.Series([SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.MethodReference],
name='SampleType',
dtype=object)
with self.subTest(msg='Default Parameters'):
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=False)
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
with self.subTest(msg='Export SP and ER'):
expectedSampleMask = numpy.array([False, False, False, False, False, True, True, True, True, True, True, False, False, False, False, False, True, False], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=False,
sampleTypes=[SampleType.StudyPool, SampleType.ExternalReference],
assayRoles=[AssayRole.PrecisionReference])
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
with self.subTest(msg='Export Dilution Samples only'):
expectedSampleMask = numpy.array([True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=False,
sampleTypes=[SampleType.StudyPool],
assayRoles=[AssayRole.LinearityReference])
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
def test_updateMasks_features(self):
noSamp = 10
noFeat = numpy.random.randint(1000, high=10000, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='NMRDataset',
variableType=nPYc.enumerations.VariableType.Spectral,
sop='GenericNMRurine')
dataset.Attributes.pop('LWFailThreshold', None)
dataset.Attributes.pop('baselineCheckRegion', None)
dataset.Attributes.pop('solventPeakCheckRegion', None)
ppm = numpy.linspace(-10, 10, noFeat)
dataset.featureMetadata = pandas.DataFrame(ppm, columns=['ppm'])
with self.subTest(msg='Single range'):
ranges = (-1.1, 1.2)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=True,
filterSamples=False,
exclusionRegions=ranges)
expectedFeatureMask = numpy.logical_or(ppm < ranges[0],
ppm > ranges[1])
numpy.testing.assert_array_equal(expectedFeatureMask, dataset.featureMask)
with self.subTest(msg='Reversed range'):
ranges = (7.1, 1.92)
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=True,
filterSamples=False,
exclusionRegions=ranges)
expectedFeatureMask = numpy.logical_or(ppm < ranges[1],
ppm > ranges[0])
numpy.testing.assert_array_equal(expectedFeatureMask, dataset.featureMask)
with self.subTest(msg='list of ranges'):
ranges = [(-5,-1), (1,5)]
dataset.initialiseMasks()
dataset.updateMasks(filterFeatures=True,
filterSamples=False,
exclusionRegions=ranges)
expectedFeatureMask1 = numpy.logical_or(ppm < ranges[0][0],
ppm > ranges[0][1])
expectedFeatureMask2 = numpy.logical_or(ppm < ranges[1][0],
ppm > ranges[1][1])
expectedFeatureMask = numpy.logical_and(expectedFeatureMask1,
expectedFeatureMask2)
numpy.testing.assert_array_equal(expectedFeatureMask, dataset.featureMask)
def test_updateMasks_raises(self):
with self.subTest(msg='No Ranges'):
self.dataset.Attributes['exclusionRegions'] = None
self.assertRaises(ValueError, self.dataset.updateMasks, filterFeatures=True, filterSamples=False, exclusionRegions=None)
def test_updateMasks_warns(self):
with self.subTest(msg='Range low == high'):
self.dataset.Attributes['exclusionRegions'] = None
self.assertWarnsRegex(UserWarning, 'Low \(1\.10\) and high \(1\.10\) bounds are identical, skipping region', self.dataset.updateMasks, filterFeatures=True, filterSamples=False, exclusionRegions=(1.1,1.1))
def test_nmrQCchecks(self):
self.dataset.Attributes.pop('LWFailThreshold', None)
self.dataset.Attributes.pop('baselineCheckRegion', None)
self.dataset.Attributes.pop('solventPeakCheckRegion', None)
with self.subTest('Calibration'):
bounds = numpy.std(self.dataset.sampleMetadata['Delta PPM']) * 3
self.dataset.sampleMetadata.loc[0::30, 'Delta PPM'] = bounds * 15
self.dataset._nmrQCChecks()
# Check mask
expected = numpy.zeros_like(self.dataset.sampleMask, dtype=bool)
expected[0::30] = True
numpy.testing.assert_array_equal(expected, self.dataset.sampleMetadata['CalibrationFail'].values)
# Check other tests have not happened
# Commented out assuming the test nmr dataset obtained with generateTestDataset always has these columns
#for skipedCheck in ['LineWidthFail', 'BaselineFail', 'WaterPeakFail']:
# self.assertFalse(skipedCheck in self.dataset.sampleMetadata.columns)
with self.subTest('Line Width'):
self.dataset.Attributes['LWFailThreshold'] = 2
self.dataset.sampleMetadata['Line Width (Hz)'] = 1.5
self.dataset.sampleMetadata.loc[0::5, 'Line Width (Hz)'] = 3
self.dataset._nmrQCChecks()
expected = numpy.zeros_like(self.dataset.sampleMask, dtype=bool)
expected[0::5] = True
numpy.testing.assert_array_equal(expected, self.dataset.sampleMetadata['LineWidthFail'].values)
# Check other tests have not happened
# Commented out assuming the test nmr dataset obtained with generateTestDataset always has these columns
#for skipedCheck in ['BaselineFail', 'WaterPeakFail']:
# self.assertFalse(skipedCheck in self.dataset.sampleMetadata.columns)
with self.subTest('Baseline'):
self.dataset.Attributes['baselineCheckRegion'] = [(-2, -0.5), (9.5, 12.5)]
self.dataset.intensityData[0,:] = 100
self.dataset.intensityData[2,:] = -100
self.dataset._nmrQCChecks()
expected = numpy.zeros_like(self.dataset.sampleMask, dtype=bool)
expected[0] = True
expected[2] = True
numpy.testing.assert_array_equal(expected, self.dataset.sampleMetadata['BaselineFail'].values)
# Check other tests have not happened
# Commented out assuming the test nmr dataset obtained with generateTestDataset always has these columns
#self.assertFalse('WaterPeakFail' in self.dataset.sampleMetadata.columns)
with self.subTest('Solvent Peak'):
self.dataset.Attributes['solventPeakCheckRegion'] = [(-2, -0.5), (9.5, 12.5)]
self.dataset._nmrQCChecks()
expected = numpy.zeros_like(self.dataset.sampleMask, dtype=bool)
expected[0] = True
# expected[2] = True
numpy.testing.assert_array_equal(expected, self.dataset.sampleMetadata['SolventPeakFail'].values)
def test_baselineAreaAndNeg(self):
"""
Validate baseline/WP code, creates random spectra and values that should always fail ie <0 and high extreme and diagonal.
"""
variableSize = 20000
X = numpy.random.rand(86, variableSize)*1000
X = numpy.r_[X, numpy.full((1, variableSize), -10000)] # add a minus val row r_ shortcut notation for vstack
X = numpy.r_[X, numpy.full((1, variableSize), 200000)] # add a minus val row r_ shortcut notation for vstack
a1 = numpy.arange(0,variableSize,1)[numpy.newaxis] #diagonal ie another known fail
X = numpy.concatenate((X, a1), axis=0)#concatenate into X
X = numpy.r_[X, numpy.random.rand(2, variableSize)* 10000]
#add more fails random but more variablility than the average 86 above
#create ppm
ppm = numpy.linspace(-1,10, variableSize) #
ppm_high = numpy.where(ppm >= 9.5)[0]
ppm_low = numpy.where(ppm <= -0.5)[0]
high_baseline = qcCheckBaseline(X[:, ppm_high], 0.05)
low_baseline = qcCheckBaseline(X[:, ppm_low], 0.05)
baseline_fail_calculated = high_baseline | low_baseline
baseline_fail_expected = numpy.zeros(91, dtype=bool)
baseline_fail_expected[86:89] = True
numpy.testing.assert_array_equal(baseline_fail_expected, baseline_fail_calculated)
class test_nmrdataset_bruker(unittest.TestCase):
def setUp(self):
"""
setup the pulseprogram and path for purpose of testing NMR bruker data functions
"""
self.pulseProgram = 'noesygppr1d'
self.path = os.path.join('..', '..', 'npc-standard-project', 'unitTest_Data', 'nmr')
def test_addSampleInfo_npclims(self):
with self.subTest(msg='Urine dataset (UnitTest1).'):
dataPath = os.path.join('..', '..', 'npc-standard-project', 'Raw_Data', 'nmr', 'UnitTest1')
limsFilePath = os.path.join('..', '..', 'npc-standard-project', 'Derived_Worklists', 'UnitTest1_NMR_urine_PCSOP.011.csv')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dataset = nPYc.NMRDataset(dataPath, pulseProgram='noesygppr1d', sop='GenericNMRurine')
dataset.sampleMetadata.sort_values('Sample File Name', inplace=True)
sortIndex = dataset.sampleMetadata.index.values
dataset.intensityData = dataset.intensityData[sortIndex, :]
dataset.sampleMetadata = dataset.sampleMetadata.reset_index(drop=True)
expected = copy.deepcopy(dataset.sampleMetadata)
dataset.addSampleInfo(descriptionFormat='NPC LIMS', filePath=limsFilePath)
testSeries = ['Sample ID', 'Status', 'AssayRole', 'SampleType']
expected['Sample ID'] = ['UT1_S2_u1', 'UT1_S3_u1', 'UT1_S4_u1', 'UT1_S4_u2', 'UT1_S4_u3',
'UT1_S4_u4', 'External Reference Sample', 'Study Pool Sample']
expected['Status'] = ['Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Long Term Reference', 'Study Reference']
expected['AssayRole'] = [AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay,
AssayRole.Assay, AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference]
expected['SampleType'] = [SampleType.StudySample, SampleType.StudySample, SampleType.StudySample, SampleType.StudySample,
SampleType.StudySample, SampleType.StudySample, SampleType.ExternalReference, SampleType.StudyPool]
for series in testSeries:
with self.subTest(msg='Testing %s' % series):
pandas.testing.assert_series_equal(dataset.sampleMetadata[series], expected[series])
with self.subTest(msg='Serum dataset (UnitTest3).'):
dataPath = os.path.join('..', '..', 'npc-standard-project', 'Raw_Data', 'nmr', 'UnitTest3')
limsFilePath = os.path.join('..', '..', 'npc-standard-project', 'Derived_Worklists', 'UnitTest3_NMR_serum_PCSOP.012.csv')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dataset = nPYc.NMRDataset(dataPath, pulseProgram='cpmgpr1d', sop='GenericNMRurine') # Use blood sop to avoid calibration of empty spectra
dataset.sampleMetadata.sort_values('Sample File Name', inplace=True)
sortIndex = dataset.sampleMetadata.index.values
dataset.intensityData = dataset.intensityData[sortIndex, :]
dataset.sampleMetadata = dataset.sampleMetadata.reset_index(drop=True)
expected = copy.deepcopy(dataset.sampleMetadata)
dataset.addSampleInfo(descriptionFormat='NPC LIMS', filePath=limsFilePath)
testSeries = ['Sample ID', 'Status', 'AssayRole', 'SampleType']
expected['Sample ID'] = ['UT3_S7', 'UT3_S8', 'UT3_S6', 'UT3_S5', 'UT3_S4', 'UT3_S3', 'UT3_S2', 'External Reference Sample', 'Study Pool Sample', 'UT3_S1']
expected['Status'] = ['Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Sample', 'Long Term Reference', 'Study Reference', 'nan']
expected['AssayRole'] = [AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay,
AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference, AssayRole.Assay]
expected['SampleType'] = [SampleType.StudySample, SampleType.StudySample, SampleType.StudySample, SampleType.StudySample, SampleType.StudySample,
SampleType.StudySample, SampleType.StudySample, SampleType.ExternalReference, SampleType.StudyPool, SampleType.StudySample]
for series in testSeries:
with self.subTest(msg='Testing %s' % series):
pandas.testing.assert_series_equal(dataset.sampleMetadata[series], expected[series])
class test_nmrdataset_ISATAB(unittest.TestCase):
def test_exportISATAB(self):
nmrData = nPYc.NMRDataset('', fileType='empty')
raw_data = {
'Acquired Time': ['2016-08-09 01:36:23', '2016-08-09 01:56:23', '2016-08-09 02:16:23', '2016-08-09 02:36:23', '2016-08-09 02:56:23'],
'AssayRole': ['AssayRole.LinearityReference', 'AssayRole.LinearityReference',
'AssayRole.LinearityReference', 'AssayRole.Assay', 'AssayRole.Assay'],
#'SampleType': ['SampleType.StudyPool', 'SampleType.StudyPool', 'SampleType.StudyPool','SampleType.StudySample', 'SampleType.StudySample'],
'Status': ['SampleType.StudyPool', 'SampleType.StudyPool', 'SampleType.StudyPool','SampleType.StudySample', 'SampleType.StudySample'],
'Subject ID': ['', '', '', 'SCANS-120', 'SCANS-130'],
'Sampling ID': ['', '', '', 'T0-7-S', 'T0-9-S'],
'Sample File Name': ['sfn1', 'sfn2', 'sfn3', 'sfn4', 'sfn5'],
'Study': ['TestStudy', 'TestStudy', 'TestStudy', 'TestStudy', 'TestStudy'],
'Gender': ['', '', '', 'Female', 'Male'],
'Age': ['', '', '', '55', '66'],
'Sampling Date': ['', '', '', '27/02/2006', '28/02/2006'],
'Sample batch': ['', '', '', 'SB 1', 'SB 2'],
'Batch': ['1', '2', '3', '4', '5'],
'Run Order': ['0', '1', '2', '3', '4'],
'Instrument': ['QTOF 2', 'QTOF 2', 'QTOF 2', 'QTOF 2', 'QTOF 2'],
'Assay data name': ['', '', '', 'SS_LNEG_ToF02_S1W4', 'SS_LNEG_ToF02_S1W5']
}
nmrData.sampleMetadata = pandas.DataFrame(raw_data,
columns=['Acquired Time', 'AssayRole', 'Status', 'Subject ID',
'Sampling ID', 'Study', 'Gender', 'Age', 'Sampling Date',
'Sample batch', 'Batch',
'Run Order', 'Instrument', 'Assay data name','Sample File Name'])
with tempfile.TemporaryDirectory() as tmpdirname:
details = {
'investigation_identifier' : "i1",
'investigation_title' : "Give it a title",
'investigation_description' : "Add a description",
'investigation_submission_date' : "2016-11-03", #use today if not specified
'investigation_public_release_date' : "2016-11-03",
'first_name' : "Noureddin",
'last_name' : "Sadawi",
'affiliation' : "University",
'study_filename' : "my_nmr_study",
'study_material_type' : "Serum",
'study_identifier' : "s1",
'study_title' : "Give the study a title",
'study_description' : "Add study description",
'study_submission_date' : "2016-11-03",
'study_public_release_date' : "2016-11-03",
'assay_filename' : "my_nmr_assay"
}
nmrData.initialiseMasks()
nmrData.exportDataset(destinationPath=tmpdirname, isaDetailsDict=details, saveFormat='ISATAB')
investigatio_file = os.path.join(tmpdirname,'i_investigation.txt')
numerrors = 0
with open(investigatio_file) as fp:
report = isatab.validate(fp)
numerrors = len(report['errors'])
#self.assertTrue(os.path.exists(a))
self.assertEqual(numerrors, 0, msg="ISATAB Validator found {} errors in the ISA-Tab archive".format(numerrors))
class test_nmrdataset_initialiseFromCSV(unittest.TestCase):
def test_init(self):
noSamp = numpy.random.randint(5, high=10, size=None)
noFeat = numpy.random.randint(500, high=1000, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='NMRDataset', sop='GenericNMRurine')
dataset.name = 'Testing'
with tempfile.TemporaryDirectory() as tmpdirname:
dataset.exportDataset(destinationPath=tmpdirname, saveFormat='CSV', withExclusions=False)
pathName = os.path.join(tmpdirname, 'Testing_sampleMetadata.csv')
rebuitData = nPYc.NMRDataset(pathName, fileType='CSV Export')
numpy.testing.assert_array_equal(rebuitData.intensityData, dataset.intensityData)
for column in ['Sample File Name', 'SampleType', 'AssayRole', 'Acquired Time', 'Run Order']:
pandas.testing.assert_series_equal(rebuitData.sampleMetadata[column], dataset.sampleMetadata[column], check_dtype=False)
for column in ['ppm']:
pandas.testing.assert_series_equal(rebuitData.featureMetadata[column], dataset.featureMetadata[column], check_dtype=False)
self.assertEqual(rebuitData.name, dataset.name)
if __name__ == '__main__':
unittest.main()
|
from django.contrib import admin
from bug.models import Bug
class BugAdmin(admin.ModelAdmin):
list_display = ['bugname ', 'bugdetail ', ' bugstatus', ' buglevel', ' bugcreater', ' bugassign', 'create_time','id']
admin.site.register(Bug) # 把Bug管理模块注册到Django admin后台并能显示
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Em on 2018/1/9
import time
import jieba
import sys
import tb_login
import threading
from api import *
from tb_api import *
from tb_api import get_api_urls
import sys
import getopt
# def main():
# crawler_threads = []
# cookie = tb_login.get_cookies(False)
# for index, api_url in enumerate(get_api_urls()):
# t_name = 'crawler_thread_' + str(index)
# print u'开启线程 ===> ' + t_name
# t = threading.Thread(name=t_name, target=crawler_product, args=(cookie, api_url, ))
# crawler_threads.append(t)
# t.start()
# for t in crawler_threads:
# t.join()
#
# def main():
# cookie = tb_login.get_cookies(False)
# for index, api_url in enumerate(get_api_urls()):
# # t_name = 'crawler_thread_' + str(index)
# print u'开启抓取 ===> ' + str(index)
# crawler_product(cookie, api_url)
# # t = threading.Thread(name=t_name, target=crawler_product, args=(cookie, api_url, ))
def main():
cookie = tb_login.get_cookies(False)
while True:
try:
c_address = get_crawler_address()
print u'开始抓取 =================== Start '
crawler_product(cookie, c_address['data'])
except Exception as e:
print u'没有可抓取项,休眠60s ...'
time.sleep(60)
def crawler_product(cookie, dit):
for i in range(1 if dit['start_page']==0 else dit['start_page'], 1000 if dit['end_page']==0 else dit['end_page']):
end = crawler_product_page(dit, i, cookie)
if end:
print u'======================== 结束 ========================'
break
def crawler_product_page(dit, page, cookies):
print u'============================= 开始抓取第 ' + str(page) + u'页 ============================='
print u'url ==> ' + get_product_url(dit['product_url'], page)
print '\n'
r = requests.get(get_product_url(dit['product_url'], page), cookies=cookies)
info = r.json()['data']
for p in info['pageList']:
seg_list = jieba.cut_for_search(p['title'])
tags = ','.join(seg_list)
mys = {}
mys['nick'] = p['nick']
mys['name'] = p['shopTitle']
mys['seller_id'] = p['sellerId']
mys['tsid'] = p['sellerId']
store = create_store(mys)
myp = {}
myp['tpid'] = p['auctionId']
myp['cid'] = dit['category']
myp['sid'] = store['data']['id']
myp['name'] = p['title']
myp['tags'] = tags
myp['cover'] = p['pictUrl']
myp['price'] = p['zkPrice']
myp['source_link'] = p['auctionUrl']
myp['coupon'] = 1 if p['couponTotalCount'] > 0 else 0
myp['coupon_info'] = p['couponInfo']
myp['coupon_start_fee'] = p['couponStartFee']
myp['coupon_amount'] = p['couponAmount']
myp['biz30day'] = p['biz30day']
myp['commission_rate'] = p['eventRate']
myp['tk_comm_fee'] = p['tkCommFee']
myp['tk_rate'] = p['tkRate']
myp['description'] = p['title']
myp['coupon_effective_start_at'] = p['couponEffectiveStartTime']
myp['coupon_effective_end_at'] = p['couponEffectiveEndTime']
myp['status'] = 1
# r
url = get_coupon_url(dit['coupon_url'], p['auctionId'])
r = requests.get(url, cookies=cookies)
data = r.json()['data']
try:
myp['tpwd'] = data['taoToken']
myp['link_short'] = data['shortLinkUrl']
myp['link_long'] = data['clickUrl']
myp['qrcode'] = data['qrCodeUrl']
except Exception as e:
pass
try:
if myp['coupon']:
myp['ctpwd'] = data['couponLinkTaoToken']
myp['coupon_link_short'] = data['couponShortLinkUrl']
myp['coupon_link_long'] = data['couponLink']
except Exception as e:
print u'没有优惠券'
create_product(myp)
print '\n=========================== Item End ================================== \n'
time.sleep(2)
if info['head']['pageNo'] == info['paginator']['pages']:
return True
else:
time.sleep(60)
return False
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
# 参数
try:
opts, args = getopt.getopt(sys.argv[1:], "dhv", ['debug', 'help', 'version'])
for op, value in opts:
if op in ('-d', '--debug'):
set_debug(True)
elif op in ("-h", "--help"):
print usage.__doc__
sys.exit()
elif op in ("-v", "--version"):
print get_version()
sys.exit()
else:
print "Using the wrong way, please view the help information."
except getopt.GetoptError as err:
print usage.__doc__
sys.exit(1)
main()
|
"""prawdditions setup.py."""
import re
from codecs import open
from os import path
from setuptools import find_packages, setup
PACKAGE_NAME = "prawdditions"
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp:
README = fp.read()
with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp:
VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1)
extras = {
"ci": ["coveralls"],
"dev": ["pre-commit"],
"lint": ["black", "flake8", "pydocstyle", "sphinx", "sphinx_rtd_theme"],
"test": [
"betamax >=0.8, <0.9",
"betamax-matchers >=0.3.0, <0.5",
"betamax-serializers >=0.2, <0.3",
"mock >=0.8",
"pytest >=2.7.3",
],
}
extras["dev"] += extras["lint"] + extras["test"]
setup(
name=PACKAGE_NAME,
author="PokestarFan",
author_email="sarkaraoyan@gmail.com",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Utilities",
],
description="High-level utilities for PRAW.",
extras_require=extras,
install_requires=["praw < 7.0"],
keywords="praw additions",
license="Simplified BSD License",
long_description=README,
packages=find_packages(exclude=["tests", "tests.*"]),
package_data={"": ["LICENSE.txt"]},
test_suite="tests",
url="https://github.com/praw-dev/prawdditions",
version=VERSION,
)
|
# noinspection PyUnresolvedReferences
from api.imagestore.base.views import * # noqa: F401,F403
# noinspection PyUnresolvedReferences
from api.imagestore.image.views import * # noqa: F401,F403
|
import json
import sys
import time
import logging
from json.decoder import JSONDecodeError
from kafka_postgres.definitions.keys import MessageJsonKeys
from kafka_postgres.kafka_helper.consumer import Consumer
from kafka_postgres.kafka_helper.producer import Producer
from kafka_postgres.postgresql_helper.exceptions import DataBaseOperationError
from kafka_postgres.postgresql_helper.postgresql_helper import PostgreSqlClient
from kafka_postgres.web_health_monitor.exceptions import WebMonitorException
from kafka_postgres.web_health_monitor.web_monitor import HealthMonitor
log = logging.getLogger("kafka_postgresql_demo")
def start_web_monitoring_database_reporting(kafka_config: dict, postgresql_config: dict) -> None:
"""
Starts the Web monitoring results to database part.
It will starts the Kafka consumer and PostgreSQL clients so it can consume new results and report them to
the data base.
:param kafka_config: the Kafka configuration (as from config file).
:param postgresql_config: the PostgreSQL configuration (as from config file).
"""
try:
consumer = Consumer(**kafka_config)
db_client = PostgreSqlClient(**postgresql_config)
messages_bulk = []
def insert_to_table(data: str):
"""helper internal method to act as the lambda function for the consumer read"""
log.debug("message: %s", data)
try:
json_data = json.loads(data)
json_data["pattern"] = '%s'%json_data["pattern"].replace("'", "\"")
messages_bulk.append(
(json_data[MessageJsonKeys.URL],
json_data[MessageJsonKeys.STATUS_CODE],
json_data[MessageJsonKeys.STATUS_CODE_OK],
json_data[MessageJsonKeys.RESPONSE_TIME_SECS],
json_data[MessageJsonKeys.METHOD],
json_data[MessageJsonKeys.IS_PATTERN_FOUND],
json_data[MessageJsonKeys.PATTERN],
json_data[MessageJsonKeys.MATCHES][:255])
)
if len(messages_bulk) >= consumer.bulk_count:
db_client.bulk_insert_monitoring_results(messages_bulk)
messages_bulk.clear()
else:
log.debug(
"Bulk insert limit to web monitoring, not reached yet current %d - required %d .",
len(messages_bulk),
consumer.bulk_count)
except JSONDecodeError as json_error:
log.error("Failed to decode message to json: %s.", json_error)
if consumer.connect() and db_client.connect():
log.info("Successfully connected to Kafka, starting to consume an process messages. ")
consumer.read(message_handler=lambda consumer_record: insert_to_table(consumer_record.value))
except KeyError as key_error:
consumer.close()
db_client.close()
log.error("Configuration value missing from config file: %s", key_error)
sys.exit(1)
except DataBaseOperationError as db_error:
log.info("A Database operation error occurred: %s, will stop run.", db_error)
sys.exit(1)
except KeyboardInterrupt:
log.info("Received Ctrl-C, closing connection and exiting...")
consumer.close()
db_client.close()
sys.exit(0)
def start_web_monitoring_website_checking(kafka_config: dict, web_monitor_config: dict) -> None:
"""
Starts the Web monitoring actual checking.
It will starts the Kafka producer and WebMonitor clients so it can perform new check and produce new the check
results into Kakfa topic.
:param kafka_config: the Kafka configuration (as from config file).
:param web_monitor_config: the Web Monitoring configuration (as from config file).
"""
try:
producer = Producer(**kafka_config)
if producer.connect():
log.info("Successfully connected to Kafka, starting to consume and process messages.")
health_monitor = HealthMonitor(**web_monitor_config)
check_counter = 0
while True:
data = health_monitor.check()
check_counter += 1
log.debug("message: %s", json.dumps(data))
producer.write(json.dumps(data))
if check_counter >= producer.bulk_count:
producer.client.flush()
check_counter = 0
time.sleep(health_monitor.monitor_interval_in_sec)
except KeyError as kw_error:
producer.close()
log.error("Configuration value missing from config file: %s", kw_error)
sys.exit(1)
except WebMonitorException as wm_error:
producer.close()
log.error("An Error occurred during web monitoring, details: %s", wm_error)
sys.exit(1)
except KeyboardInterrupt:
log.info("Received Ctrl-C, closing connection and exiting...")
producer.close()
sys.exit(0)
|
# -*- coding: utf-8 -*-
"""
Autor: André Pacheco
Email: pacheco.comp@gmail.com
Function to load the CNN models
"""
from effnet import MyEffnet
from densenet import MyDensenet
from mobilenet import MyMobilenet
from resnet import MyResnet
from vggnet import MyVGGNet
from inceptionv4 import MyInceptionV4
from senet import MySenet
from torchvision import models
import pretrainedmodels as ptm
from efficientnet_pytorch import EfficientNet
_MODELS = ['resnet-50', 'resnet-101', 'densenet-121', 'inceptionv4', 'googlenet', 'vgg-13', 'vgg-16', 'vgg-19',
'mobilenet', 'efficientnet-b4', 'senet']
def get_norm_and_size (model_name):
if model_name == "inceptionv4":
return [0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [229, 229]
else:
return [0.485, 0.456, 0.406], [0.229, 0.224, 0.225], [224, 224]
def set_model (model_name, num_class, neurons_reducer_block=0, comb_method=None, comb_config=None, pretrained=True,
freeze_conv=False, p_dropout=0.5):
if pretrained:
pre_ptm = 'imagenet'
pre_torch = True
else:
pre_torch = False
pre_ptm = None
if model_name not in _MODELS:
raise Exception("The model {} is not available!".format(model_name))
model = None
if model_name == 'resnet-50':
model = MyResnet(models.resnet50(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
comb_method=comb_method, comb_config=comb_config)
elif model_name == 'resnet-101':
model = MyResnet(models.resnet101(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
comb_method=comb_method, comb_config=comb_config)
elif model_name == 'densenet-121':
model = MyDensenet(models.densenet121(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
comb_method=comb_method, comb_config=comb_config)
elif model_name == 'vgg-13':
model = MyVGGNet(models.vgg13_bn(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
comb_method=comb_method, comb_config=comb_config)
elif model_name == 'vgg-16':
model = MyVGGNet(models.vgg16_bn(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
comb_method=comb_method, comb_config=comb_config)
elif model_name == 'vgg-19':
model = MyVGGNet(models.vgg19_bn(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
comb_method=comb_method, comb_config=comb_config)
elif model_name == 'mobilenet':
model = MyMobilenet(models.mobilenet_v2(pretrained=pre_torch), num_class, neurons_reducer_block, freeze_conv,
comb_method=comb_method, comb_config=comb_config)
elif model_name == 'efficientnet-b4':
if pretrained:
model = MyEffnet(EfficientNet.from_pretrained(model_name), num_class, neurons_reducer_block, freeze_conv,
comb_method=comb_method, comb_config=comb_config)
else:
model = MyEffnet(EfficientNet.from_name(model_name), num_class, neurons_reducer_block, freeze_conv,
comb_method=comb_method, comb_config=comb_config)
elif model_name == 'inceptionv4':
model = MyInceptionV4(ptm.inceptionv4(num_classes=1000, pretrained=pre_ptm), num_class, neurons_reducer_block,
freeze_conv, comb_method=comb_method, comb_config=comb_config)
elif model_name == 'senet':
model = MySenet(ptm.senet154(num_classes=1000, pretrained=pre_ptm), num_class, neurons_reducer_block,
freeze_conv, comb_method=comb_method, comb_config=comb_config)
return model
|
import random
from django.urls import reverse
from faker import Faker
from rest_framework import status
from openbook_common.tests.models import OpenbookAPITestCase
import logging
import json
from openbook_common.tests.helpers import make_user, make_authentication_headers_for_user, \
make_community
logger = logging.getLogger(__name__)
fake = Faker()
class CommunityModeratorsAPITest(OpenbookAPITestCase):
def test_can_get_community_moderators_if_admin(self):
"""
should be able to retrieve the community moderators if user is admin of community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user)
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_administrator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
amount_of_moderators = 5
moderators_ids = [
]
for i in range(0, amount_of_moderators):
community_member = make_user()
community_member.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=community_member,
community_name=community.name)
moderators_ids.append(community_member.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_moderators = json.loads(response.content)
self.assertEqual(len(response_moderators), len(moderators_ids))
for response_moderator in response_moderators:
response_moderator_id = response_moderator.get('id')
self.assertIn(response_moderator_id, moderators_ids)
def test_can_get_community_moderators_if_mod(self):
"""
should be able to retrieve the community moderators if user is moderator of community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user)
community_name = community.name
user.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
amount_of_moderators = 5
moderators_ids = [
user.pk
]
for i in range(0, amount_of_moderators):
community_member = make_user()
community_member.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=community_member,
community_name=community.name)
moderators_ids.append(community_member.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_moderators = json.loads(response.content)
self.assertEqual(len(response_moderators), len(moderators_ids))
for response_moderator in response_moderators:
response_moderator_id = response_moderator.get('id')
self.assertIn(response_moderator_id, moderators_ids)
def test_can_get_community_moderators_if_member(self):
"""
should be able to retrieve the community moderators if user is member of community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
other_user = make_user()
community = make_community(creator=other_user)
community_name = community.name
user.join_community_with_name(community_name)
amount_of_moderators = 5
moderators_ids = [
]
for i in range(0, amount_of_moderators):
community_member = make_user()
community_member.join_community_with_name(community_name)
other_user.add_moderator_with_username_to_community_with_name(username=community_member,
community_name=community.name)
moderators_ids.append(community_member.pk)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_moderators = json.loads(response.content)
self.assertEqual(len(response_moderators), len(moderators_ids))
for response_moderator in response_moderators:
response_moderator_id = response_moderator.get('id')
self.assertIn(response_moderator_id, moderators_ids)
def test_cant_get_community_moderators_if_banned(self):
"""
should not be able to retrieve the community moderators if user has been banned from community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_owner = make_user()
community = make_community(creator=community_owner)
community_name = community.name
user.join_community_with_name(community_name)
community_owner.ban_user_with_username_from_community_with_name(username=user.username,
community_name=community.name)
amount_of_moderators = 5
for i in range(0, amount_of_moderators):
community_member = make_user()
community_member.join_community_with_name(community_name)
community_owner.add_moderator_with_username_to_community_with_name(username=community_member,
community_name=community.name)
url = self._get_url(community_name=community.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_add_community_moderator_if_creator(self):
"""
should be able to add a community moderator if user is creator of community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user, type='P')
user_to_make_moderator = make_user()
user_to_make_moderator.join_community_with_name(community_name=community.name)
url = self._get_url(community_name=community.name)
response = self.client.put(url, {
'username': user_to_make_moderator.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(
user_to_make_moderator.is_moderator_of_community_with_name(community_name=community.name))
def test_logs_community_moderator_added(self):
"""
should create a log when community moderator was added
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user, type='P')
moderator_to_add = make_user()
moderator_to_add.join_community_with_name(community_name=community.name)
url = self._get_url(community_name=community.name)
self.client.put(url, {
'username': moderator_to_add.username
}, **headers)
self.assertTrue(community.logs.filter(action_type='AM',
source_user=user,
target_user=moderator_to_add).exists())
def test_can_add_community_moderator_if_admin(self):
"""
should be able to add a community moderator if user is administrator of community
"""
user = make_user()
other_user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=other_user, type='P')
user.join_community_with_name(community_name=community.name)
other_user.add_administrator_with_username_to_community_with_name(username=user,
community_name=community.name)
user_to_make_admnistrator = make_user()
user_to_make_admnistrator.join_community_with_name(community_name=community.name)
url = self._get_url(community_name=community.name)
response = self.client.put(url, {
'username': user_to_make_admnistrator.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(
user_to_make_admnistrator.is_moderator_of_community_with_name(community_name=community.name))
def test_cant_add_community_moderator_if_mod(self):
"""
should not be able to add a community moderator if user is moderator of community
"""
user = make_user()
other_user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=other_user, type='P')
user.join_community_with_name(community_name=community.name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
user_to_make_admnistrator = make_user()
user_to_make_admnistrator.join_community_with_name(community_name=community.name)
url = self._get_url(community_name=community.name)
response = self.client.put(url, {
'username': user_to_make_admnistrator.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(
user_to_make_admnistrator.is_moderator_of_community_with_name(community_name=community.name))
def test_cant_add_community_moderator_if_member(self):
"""
should not be able to add a community moderator if user is just a member of community
"""
user = make_user()
other_user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=other_user, type='P')
user.join_community_with_name(community_name=community.name)
user_to_make_admnistrator = make_user()
user_to_make_admnistrator.join_community_with_name(community_name=community.name)
url = self._get_url(community_name=community.name)
response = self.client.put(url, {
'username': user_to_make_admnistrator.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(
user_to_make_admnistrator.is_moderator_of_community_with_name(community_name=community.name))
def test_cant_add_community_moderator_if_not_member(self):
"""
should not be able to add a community moderator if user is not even a member of community
"""
user = make_user()
other_user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=other_user, type='P')
user_to_make_moderator = make_user()
url = self._get_url(community_name=community.name)
response = self.client.put(url, {
'username': user_to_make_moderator.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(
user_to_make_moderator.is_moderator_of_community_with_name(community_name=community.name))
def test_cant_add_community_moderator_if_admin(self):
"""
should not be able to add a community moderator if the user is already an admin
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user, type='P')
user_to_make_moderator = make_user()
user_to_make_moderator.join_community_with_name(community_name=community.name)
user.add_administrator_with_username_to_community_with_name(username=user_to_make_moderator.username,
community_name=community.name)
url = self._get_url(community_name=community.name)
response = self.client.put(url, {
'username': user_to_make_moderator.username
}, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(
user_to_make_moderator.is_moderator_of_community_with_name(community_name=community.name))
def _get_url(self, community_name):
return reverse('community-moderators', kwargs={
'community_name': community_name
})
class CommunityModeratorAPITest(OpenbookAPITestCase):
def test_can_remove_community_moderator_if_admin(self):
"""
should be able to remove a community moderator if user is admin of the community
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user, type='P')
moderator_to_remove = make_user()
moderator_to_remove.join_community_with_name(community_name=community.name)
user.add_moderator_with_username_to_community_with_name(username=moderator_to_remove.username,
community_name=community.name)
url = self._get_url(community_name=community.name, username=moderator_to_remove.username)
response = self.client.delete(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(
moderator_to_remove.is_moderator_of_community_with_name(community_name=community.name))
def test_logs_community_moderator_removed(self):
"""
should create a log when community moderator was removed
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user, type='P')
moderator_to_remove = make_user()
moderator_to_remove.join_community_with_name(community_name=community.name)
user.add_moderator_with_username_to_community_with_name(username=moderator_to_remove.username,
community_name=community.name)
url = self._get_url(community_name=community.name, username=moderator_to_remove.username)
self.client.delete(url, **headers)
self.assertTrue(community.logs.filter(action_type='RM',
source_user=user,
target_user=moderator_to_remove).exists())
def test_cant_remove_community_moderator_if_mod(self):
"""
should not be able to remove a community moderator if user is moderator
"""
user = make_user()
other_user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=other_user, type='P')
user.join_community_with_name(community.name)
other_user.add_moderator_with_username_to_community_with_name(username=user.username,
community_name=community.name)
moderator_to_remove = make_user()
moderator_to_remove.join_community_with_name(community_name=community.name)
other_user.add_moderator_with_username_to_community_with_name(username=moderator_to_remove.username,
community_name=community.name)
url = self._get_url(community_name=community.name, username=moderator_to_remove.username)
response = self.client.delete(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(
moderator_to_remove.is_moderator_of_community_with_name(community_name=community.name))
def test_cant_remove_community_moderator_if_member(self):
"""
should not be able to remove a community moderator if user is member
"""
user = make_user()
other_user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=other_user, type='P')
user.join_community_with_name(community.name)
moderator_to_remove = make_user()
moderator_to_remove.join_community_with_name(community_name=community.name)
other_user.add_moderator_with_username_to_community_with_name(username=moderator_to_remove.username,
community_name=community.name)
url = self._get_url(community_name=community.name, username=moderator_to_remove.username)
response = self.client.delete(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(
moderator_to_remove.is_moderator_of_community_with_name(community_name=community.name))
def _get_url(self, community_name, username):
return reverse('community-moderator', kwargs={
'community_name': community_name,
'community_moderator_username': username
})
class SearchCommunityModeratorsAPITests(OpenbookAPITestCase):
"""
SearchCommunityModeratorsAPITests
"""
def test_can_search_community_moderators_by_name(self):
"""
should be able to search for community moderators by their name and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user)
amount_of_community_moderators_to_search_for = 5
for i in range(0, amount_of_community_moderators_to_search_for):
moderator = make_user()
moderator.join_community_with_name(community_name=community.name)
user.add_moderator_with_username_to_community_with_name(username=moderator.username,
community_name=community.name)
moderator_name = moderator.profile.name
amount_of_characters_to_query = random.randint(1, len(moderator_name))
query = moderator_name[0:amount_of_characters_to_query]
final_query = ''
for character in query:
final_query = final_query + (character.upper() if fake.boolean() else character.lower())
url = self._get_url(community_name=community.name)
response = self.client.get(url, {
'query': final_query
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_moderators = json.loads(response.content)
response_moderators_count = len(response_moderators)
if response_moderators_count == 1:
# Our community creator was not retrieved
self.assertEqual(response_moderators_count, 1)
retrieved_moderator = response_moderators[0]
self.assertEqual(retrieved_moderator['id'], moderator.id)
else:
# Our community creator was retrieved too
for response_moderator in response_moderators:
response_moderator_id = response_moderator['id']
self.assertTrue(
response_moderator_id == moderator.id or response_moderator_id == user.id)
user.remove_moderator_with_username_from_community_with_name(username=moderator.username,
community_name=community.name)
def test_can_search_community_moderators_by_username(self):
"""
should be able to search for community moderators by their username and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community = make_community(creator=user)
amount_of_community_moderators_to_search_for = 5
for i in range(0, amount_of_community_moderators_to_search_for):
moderator = make_user()
moderator.join_community_with_name(community_name=community.name)
user.add_moderator_with_username_to_community_with_name(username=moderator.username,
community_name=community.name)
moderator_username = moderator.username
amount_of_characters_to_query = random.randint(1, len(moderator_username))
query = moderator_username[0:amount_of_characters_to_query]
final_query = ''
for character in query:
final_query = final_query + (character.upper() if fake.boolean() else character.lower())
url = self._get_url(community_name=community.name)
response = self.client.get(url, {
'query': final_query
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_moderators = json.loads(response.content)
response_moderators_count = len(response_moderators)
if response_moderators_count == 1:
# Our community creator was not retrieved
self.assertEqual(response_moderators_count, 1)
retrieved_moderator = response_moderators[0]
self.assertEqual(retrieved_moderator['id'], moderator.id)
else:
# Our community creator was retrieved too
for response_moderator in response_moderators:
response_moderator_id = response_moderator['id']
self.assertTrue(
response_moderator_id == moderator.id or response_moderator_id == user.id)
user.remove_moderator_with_username_from_community_with_name(username=moderator.username,
community_name=community.name)
def _get_url(self, community_name):
return reverse('search-community-moderators', kwargs={
'community_name': community_name,
})
|
import random
from .behaviour import Behaviour
REGROUP_RANGE = 32
RETREAT_AMT = 16
class Base(Behaviour):
def complete_init(self):
# these are pointers. be careful with modifying them.
self.target_unit = None
self.reference_entity = None
self.valid_targets = []
self.spread_max = self.unit.entity_spread_max
self.force_regroup = False
self.regrouping = False
self.force_retreat = False
self.retreating = False
self.retreat_target = None
self.position_log = []
self.check_visibility = False
self.smart_range_retarget = False
# override with ranged logic when applicable
if self.unit.use_ammo:
self.check_visibility = True
@property
def leader(self):
return self.unit.entities[0] if len(self.unit.entities) else None
def find_target(self):
"""
Find the nearest enemy from a different team and update the target.
"""
nearest = [None, 9999999]
for entity in self.game.combat.all_entities:
if entity.team != self.unit.team:
dis = entity.dis(self.unit)
if dis < nearest[1]:
nearest = [entity, dis]
if nearest[0]:
self.target_unit = nearest[0].unit
self.reference_entity = random.choice(nearest[0].unit.entities)
else:
self.target_unit = None
self.reference_entity = None
def update_valid_targets(self):
self.valid_targets = []
if self.target_unit:
for entity in self.target_unit.entities:
if entity.dis(self.reference_entity) < self.spread_max:
self.valid_targets.append(entity)
def process(self, dt):
if len(self.unit.entities):
if (not self.target_unit) or (not self.target_unit.alive):
self.find_target()
if self.leader:
loc = self.game.combat.terrain.px_to_loc(self.leader.pos.copy())
if loc not in self.position_log:
self.position_log.append(loc)
self.position_log = self.position_log[-50:]
if self.regrouping or self.retreating:
all_x = [e.pos[0] for e in self.unit.entities]
all_y = [e.pos[1] for e in self.unit.entities]
if self.retreating:
all_x.append(self.retreat_target[0])
all_y.append(self.retreat_target[1])
if (max(all_x) - min(all_x)) < REGROUP_RANGE:
if (max(all_y) - min(all_y)) < REGROUP_RANGE:
self.regrouping = False
self.retreating = False
for entity in self.unit.entities:
entity.behaviour.state = entity.behaviour.movement_mode
# update reference entity when it dies
if self.target_unit and self.target_unit.alive and (not self.reference_entity.alive):
if len(self.target_unit.entities):
self.reference_entity = random.choice(self.target_unit.entities)
else:
self.target_unit = None
# regroup/retreat since target unit died
if self.force_regroup or self.force_retreat:
if self.force_regroup:
self.regrouping = True
if self.force_retreat:
self.retreating = True
self.retreat_target = (
self.position_log[-RETREAT_AMT][0] * self.game.combat.terrain.tile_size,
self.position_log[-RETREAT_AMT][1] * self.game.combat.terrain.tile_size,
)
for entity in self.unit.entities:
entity.behaviour.state = "path"
entity.behaviour.target_entity = None
self.update_valid_targets()
|
from gym.envs.registration import register
#id='BitFlipper-n:space_seed'
register(
'BitFlipper2-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=2,
kwargs = {"space_seed":0,"n":2}
)
register(
'BitFlipper5-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=5,
kwargs = {"space_seed":0,"n":5}
)
register(
'BitFlipper8-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=8,
kwargs = {"space_seed":0,"n":8}
)
register(
'BitFlipper10-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=10,
kwargs = {"space_seed":0,"n":10}
)
register(
'BitFlipper15-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=15,
kwargs = {"space_seed":0,"n":15}
)
register(
'BitFlipper20-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=20,
kwargs = {"space_seed":0,"n":20}
)
register(
'BitFlipper25-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=25,
kwargs = {"space_seed":0,"n":25}
)
register(
'BitFlipper30-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=30,
kwargs = {"space_seed":0,"n":30}
)
register(
'BitFlipper40-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=40,
kwargs = {"space_seed":0,"n":40}
)
register(
'BitFlipper50-v0',
entry_point='gym_BitFlipper.envs:BitFlipperEnv',
max_episode_steps=50,
kwargs = {"space_seed":0,"n":50}
)
|
#!/usr/bin/env python3
"""
Exctractor class enables retrieving all nlp features from `sentence`.
Designed for python3 which requires the pattern package to be installed
using the development brach https://github.com/clips/pattern#installation
"""
__author__ = "Ivo Merchiers"
__copyright__ = "Copyright 2017, Knowledge and the Web Project"
__credits__ = ["Brecht Laperre", "Ivo Merchiers", "Rafael Hautekiet"]
__version__ = "1.0.0"
__maintainer__ = "Ivo Merchiers"
__email__ = "ivo.merchiers@student.kuleuven.be"
__status__ = "Development"
from pattern.nl import sentiment, parsetree, lemma, singularize
# import spacy
# nlp = spacy.load('nl')
postags = [u'CC', u'CD', u'DT', u'EX', u'FW', u'IN', u'JJ', u'JJR', u'JJS', u'LS', u'MD', u'NN', u'NNS', u'NNP', u'NNPS', u'PDT', u'POS', u'PRP', u'PRP$',
u'RB', u'RBR', u'RBS', u'RP', u'SYM', u'TO', u'UH', u'VB', u'VBZ', u'VBP', u'VBD', u'VBN', u'VBG', u'WDT', u'WP', u'WP$', u'WRB', u'.', u',', u':', u'(', u')']
class Extractor:
"""
Exctractor class enables retrieving all nlp features from `sentence`.
"""
def __init__(self, sentence):
self.sentence = sentence
tree = parsetree(sentence, lemmata=True)
# tree is actually a sentence in term of pattern definitions
self.tree = tree[0]
self.pcounts = self.countPosTags()
#self.ecounts = self.countEntities()
def __getitem__(self, k):
if k == "LEMMA_SENT":
return self.lemmataSentence()
elif k == "LEMMA_POS":
return self.lemmataPosTagSentence()
elif k == "POS_SENT":
return self.posTagSentence()
elif k == "LENGTH":
return self.length()
elif k == "POL":
return self.polarity()
elif k == "SUBJ":
return self.subjectivity()
def change(self, sentence):
self.sentence = sentence
tree = parsetree(sentence, lemmata=True)
self.tree = tree[0]
def length(self):
return self.tree.stop
def sent(self):
"""
:returns: (polarity, subjectivity)
"""
return sentiment(self.sentence)
def polarity(self):
return round(self.sent()[0], 4)
def subjectivity(self):
return round(self.sent()[1], 4)
def parsedSentence(self):
base = self.tree.words
return [str(b) for b in base]
def lemmataSentence(self):
base = self.tree.lemmata
base = ' '.join(base)
return base
def posTagSentence(self):
wordlist = self.tree.words
pos = map(lambda x: x.type, wordlist)
pos_sent = ' '.join(pos)
return pos_sent
def lemmataPosTagSentence(self):
wordlist = self.tree.words
base = list(self.tree.lemmata)
pos = list(map(lambda x: x.type, wordlist))
lem_pos_sent = ' '.join([ lem + "_" + pos for lem, pos in zip(base,pos)])
return lem_pos_sent
def countPosTags(self):
wordlist = self.tree.words
pos = list(map(lambda x: x.type, wordlist))
counts = {}
for i in range(0, len(postags)):
counts[postags[i]] = sum(map(lambda x: x == postags[i], pos))
return counts
def nounChunks(self):
raise Error('Not implemented')
# def countEntities(self):
# doc = nlp(self.sentence)
# ecounts = {'PER': 0, 'LOC': 0, 'ORG': 0, 'MISC': 0}
# for ent in doc.ents:
# ecounts[ent.label_] += 1
# return ecounts
# Execute following test, only if this file is ran explicitly
if __name__ == "__main__":
sentence = u'Ivo Merchiers zegt dat hij graag melk drinkt'
fe = featureExtractor(sentence)
print(fe.lemmataPosTagSentence(),fe.posTagSentence())
|
# coding=utf8
import asyncio
import json
import logging
import sys
from datetime import datetime
from pathlib import Path
from shutil import rmtree, which
from tempfile import gettempdir
from typing import Any, List, Optional, Union
import click
from black_primer import __version__, lib
# If our environment has uvloop installed lets use it
try:
import uvloop
uvloop.install()
except ImportError:
pass
DEFAULT_CONFIG = Path(__file__).parent / "primer.json"
_timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
DEFAULT_WORKDIR = Path(gettempdir()) / f"primer.{_timestamp}"
LOG = logging.getLogger(__name__)
def _handle_debug(
ctx: Optional[click.core.Context],
param: Optional[Union[click.core.Option, click.core.Parameter]],
debug: Union[bool, int, str],
) -> Union[bool, int, str]:
"""Turn on debugging if asked otherwise INFO default"""
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)",
level=log_level,
)
return debug
def load_projects(config_path: Path) -> List[str]:
with open(config_path) as config:
return sorted(json.load(config)["projects"].keys())
# Unfortunately does import time file IO - but appears to be the only
# way to get `black-primer --help` to show projects list
DEFAULT_PROJECTS = load_projects(DEFAULT_CONFIG)
def _projects_callback(
ctx: click.core.Context,
param: Optional[Union[click.core.Option, click.core.Parameter]],
projects: str,
) -> List[str]:
requested_projects = set(projects.split(","))
available_projects = set(
DEFAULT_PROJECTS
if str(DEFAULT_CONFIG) == ctx.params["config"]
else load_projects(ctx.params["config"])
)
unavailable = requested_projects - available_projects
if unavailable:
LOG.error(f"Projects not found: {unavailable}. Available: {available_projects}")
return sorted(requested_projects & available_projects)
async def async_main(
config: str,
debug: bool,
keep: bool,
long_checkouts: bool,
no_diff: bool,
projects: List[str],
rebase: bool,
workdir: str,
workers: int,
) -> int:
work_path = Path(workdir)
if not work_path.exists():
LOG.debug(f"Creating {work_path}")
work_path.mkdir()
if not which("black"):
LOG.error("Can not find 'black' executable in PATH. No point in running")
return -1
try:
ret_val = await lib.process_queue(
config,
work_path,
workers,
projects,
keep,
long_checkouts,
rebase,
no_diff,
)
return int(ret_val)
finally:
if not keep and work_path.exists():
LOG.debug(f"Removing {work_path}")
rmtree(work_path, onerror=lib.handle_PermissionError)
@click.command(context_settings={"help_option_names": ["-h", "--help"]})
@click.option(
"-c",
"--config",
default=str(DEFAULT_CONFIG),
type=click.Path(exists=True),
show_default=True,
help="JSON config file path",
# Eager - because config path is used by other callback options
is_eager=True,
)
@click.option(
"--debug",
is_flag=True,
callback=_handle_debug,
show_default=True,
help="Turn on debug logging",
)
@click.option(
"-k",
"--keep",
is_flag=True,
show_default=True,
help="Keep workdir + repos post run",
)
@click.option(
"-L",
"--long-checkouts",
is_flag=True,
show_default=True,
help="Pull big projects to test",
)
@click.option(
"--no-diff",
is_flag=True,
show_default=True,
help="Disable showing source file changes in black output",
)
@click.option(
"--projects",
default=",".join(DEFAULT_PROJECTS),
callback=_projects_callback,
show_default=True,
help="Comma separated list of projects to run",
)
@click.option(
"-R",
"--rebase",
is_flag=True,
show_default=True,
help="Rebase project if already checked out",
)
@click.option(
"-w",
"--workdir",
default=str(DEFAULT_WORKDIR),
type=click.Path(exists=False),
show_default=True,
help="Directory path for repo checkouts",
)
@click.option(
"-W",
"--workers",
default=2,
type=int,
show_default=True,
help="Number of parallel worker coroutines",
)
@click.version_option(version=__version__)
@click.pass_context
def main(ctx: click.core.Context, **kwargs: Any) -> None:
"""primer - prime projects for blackening... 🏴"""
LOG.debug(f"Starting {sys.argv[0]}")
# TODO: Change to asyncio.run when Black >= 3.7 only
loop = asyncio.get_event_loop()
try:
ctx.exit(loop.run_until_complete(async_main(**kwargs)))
finally:
loop.close()
if __name__ == "__main__": # pragma: nocover
main()
|
"""
You can remove this test after writing your own tests.
"""
def test_dummy():
assert True
|
import os
from setuptools import find_packages, setup
import versioneer
readMeFile = os.path.join(os.path.abspath(os.path.dirname(__file__)), "README.md")
if os.path.exists(readMeFile):
with open(readMeFile, encoding="utf-8") as readMeFile:
long_description = readMeFile.read()
else:
long_description = ""
setup(
name="docmaker",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Steve McMaster",
author_email="mcmaster@hurricanelabs.com",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
url="https://github.com/HurricaneLabs/docmaker",
description="A PDF generator",
long_description=long_description,
install_requires=[
"boto3",
"defusedxml",
"docx-mailmerge",
"docxcompose",
"falcon",
"jinja2",
"multipart",
"python-dateutil",
"python-frontmatter",
# "pypandoc",
"pypandoc @ git+https://github.com/mcm/pypandoc#egg=pypandoc",
"requests",
"ruamel.yaml",
"toposort",
"werkzeug"
],
entry_points={
"console_scripts": [
"docmaker = docmaker:main",
]
},
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Development Status :: 5 - Production/Stable",
],
bugtrack_url="https://github.com/HurricaneLabs/docmaker/issues",
)
|
# Generated by Django 3.2.8 on 2021-11-26 01:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0002_recipe_fat'),
]
operations = [
migrations.AddField(
model_name='category',
name='image',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='recipe',
name='carbohydrates',
field=models.FloatField(blank=True, default=0),
),
migrations.AddField(
model_name='recipe',
name='protein',
field=models.FloatField(blank=True, default=0),
),
migrations.AlterField(
model_name='recipe',
name='image',
field=models.URLField(blank=True),
),
]
|
from datetime import datetime
from mycroft import MycroftSkill, intent_handler
import os
from yeelight import *
class Yeelight(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
def initialize(self):
self.bulb = Bulb("192.168.1.60")
self.log.info("Yeelight Bulb initilized")
self.state = self.bulb.last_properties
self.log.debug(f"Bulb properties: {self.state}")
transitions = [
RGBTransition(255, 0, 0),
SleepTransition(400),
RGBTransition(0, 255, 0),
SleepTransition(400),
RGBTransition(0, 0, 255),
SleepTransition(400),
]
self.flow = Flow(0, Flow.actions.recover, transitions)
self.populate_colors_dict()
self.bulb.get_properties()
if self.bulb.last_properties["power"] == "on":
self.turned_on_time = datetime.now()
def populate_colors_dict(self):
self.colors = {}
with open(
os.path.dirname(os.path.abspath(__file__)) + "/colors.csv", "r"
) as csv:
for line in csv.readlines():
line = line.strip("\n")
values = line.split(",")
name = values[0].replace("(", "")
name = name.replace(")", "")
name = name.replace("/", "")
name = name.replace("-", " ")
name = name.replace("'", "")
name = name.lower()
red = int(values[1])
green = int(values[2])
blue = int(values[3])
self.colors[name] = (red, green, blue)
@intent_handler("on.intent")
def handle_bulb_on(self, message):
self.bulb.get_properties()
self.log.info("Turning bulb on")
self.speak_dialog("in.progress")
self.bulb.turn_on()
self.turned_on_time = datetime.now()
@intent_handler("off.intent")
def handle_bulb_off(self, message):
self.bulb.get_properties()
self.log.info("Turning bulb off")
self.speak_dialog("in.progress")
self.bulb.turn_off()
@intent_handler("change.color.intent")
def handle_change_color(self, message):
self.bulb.get_properties()
color = message.data.get("color")
self.log.info(f"Changing color to {color}")
try:
rgb = self.colors[color]
self.speak(f"Changing color to {color}")
self.bulb.set_rgb(rgb[0], rgb[1], rgb[2])
self.color = color
except KeyError as e:
self.speak(f"Color {color} does not exist!")
@intent_handler("change.intensity.intent")
def handle_change_intensity(self, message):
self.bulb.get_properties()
percent = int(message.data.get("percent").rstrip("%"))
self.log.info(f"Changing intensity to {percent} percent")
self.speak(f"Changing intensity to {percent} percent")
if percent == 0:
self.bulb.turn_off()
else:
if self.bulb.last_properties["power"] == "off":
self.bulb.turn_on()
self.turned_on_time = datetime.now()
self.bulb.set_brightness(percent)
@intent_handler("flow.mode.intent")
def handle_activate_flow(self, message):
self.bulb.get_properties()
self.log.info("Activating flow mode")
self.speak("Activating flow mode")
if self.bulb.last_properties["power"] == "off":
self.bulb.turn_on()
self.turned_on_time = datetime.now()
self.bulb.start_flow(self.flow)
self.color = 'flowing'
@intent_handler("normal.mode.intent")
def handle_activate_normal(self, message):
self.bulb.get_properties()
self.log.info("Activating normal mode")
self.speak("Activating normal mode")
self.bulb.set_rgb(255, 255, 255)
self.bulb.set_power_mode(PowerMode.NORMAL)
self.color = 'white'
@intent_handler("intensity.state.intent")
def handle_state_intensity(self, message):
self.bulb.get_properties()
self.log.info("Getting bulb intensity")
if self.bulb.last_properties["power"] == "off":
self.speak(f"The bulb intensity is at 0 percent")
self.speak(
f'The bulb intensity is at {self.bulb.last_properties["bright"]} percent'
)
@intent_handler("mode.state.intent")
def handle_state_mode(self, message):
self.bulb.get_properties()
self.log.info("Getting bulb mode")
is_flowing = True if self.bulb.last_properties["flowing"] == "1" else False
is_color = False if self.bulb.last_properties["rgb"] == "16777215" else True
if is_flowing:
self.speak("The bulb is in flow mode")
elif is_color:
self.speak("The bulb is in color mode")
else:
self.speak("The bulb is in normal mode")
@intent_handler("color.state.intent")
def handle_state_color(self, message):
self.bulb.get_properties()
self.log.info("Getting bulb color")
try:
self.speak(f"The color of the bulb is {self.color}")
except AttributeError:
self.speak(f"The color of the bulb is white")
@intent_handler("state.intent")
def handle_state(self, message):
self.bulb.get_properties()
self.log.info("Getting bulb general state")
self.handle_turned_on(message)
self.handle_state_color(message)
self.handle_state_mode(message)
self.handle_state_intensity(message)
@intent_handler("time.turned.on.intent")
def handle_turned_on(self, message):
self.bulb.get_properties()
self.log.info("Getting bulb general state")
now = datetime.now()
timedelta = now - self.turned_on_time
seconds = int(timedelta.total_seconds())
minutes = int(seconds / 60)
suffix = 's' if minutes > 1 else ''
if seconds < 60:
self.speak(f'The bulb has been on for {seconds} seconds')
else:
self.speak(f'The bulb has been on for {minutes} minute{suffix} and {seconds % 60} seconds')
def create_skill():
return Yeelight()
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def deleteDuplicates(self, head):
if not head:
return None
f = head
top = ListNode(f.val)
c = top
while f:
f = f.next
if f and f.val != c.val:
c.next = ListNode(f.val)
c = c.next
return top
|
from flask_restful import Resource, reqparse
from flask import jsonify
class Add(Resource):
def get(self):
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('operand1', type=int, required=True, help='operand1 is required The detail error is{error_msg}')
parser.add_argument('operand2', type=int, required=True, help='operand2 is required. The detail error is {error_msg}')
args = parser.parse_args()
operand1 = args['operand1']
operand2 = args['operand2']
return jsonify({'result': operand1+operand2})
class Sub(Resource):
def get(self):
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('operand1', type=int, required=True, help='operand1 is required The detail error is{error_msg}')
parser.add_argument('operand2', type=int, required=True, help='operand2 is required. The detail error is {error_msg}')
args = parser.parse_args()
operand1 = args['operand1']
operand2 = args['operand2']
return jsonify({'result': operand1 - operand2})
|
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import cast, List, Sequence, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
from chb.invariants.XVariable import XVariable
from chb.invariants.XXpr import XXpr
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
from chb.util.IndexedTable import IndexedTableValue
from chb.x86.X86DictionaryRecord import x86registry
from chb.x86.X86Opcode import X86Opcode, simplify_result
from chb.x86.X86Operand import X86Operand
if TYPE_CHECKING:
from chb.x86.X86Dictionary import X86Dictionary
from chb.x86.simulation.X86SimulationState import X86SimulationState
@x86registry.register_tag("imul", X86Opcode)
class X86IMul(X86Opcode):
"""IMUL dst, src, imm .
args[0]: size (in bytes)
args[1]: index of dst in x86dictionary
args[2]: index of src1 in x86dictionary
args[3]: index of src2 in x86dictionary
"""
def __init__(
self,
x86d: "X86Dictionary",
ixval: IndexedTableValue) -> None:
X86Opcode.__init__(self, x86d, ixval)
@property
def size(self) -> int:
return int(self.args[0])
@property
def dst_operand(self) -> X86Operand:
return self.x86d.operand(self.args[1])
@property
def src1_operand(self) -> X86Operand:
return self.x86d.operand(self.args[2])
@property
def src2_operand(self) -> X86Operand:
return self.x86d.operand(self.args[3])
@property
def operands(self) -> Sequence[X86Operand]:
return [self.dst_operand, self.src1_operand, self.src2_operand]
def annotation(self, xdata: InstrXData) -> str:
"""data format a:vxxxx
vars[0]: lhs
xprs[0]: rhs1
xprs[1]: rhs2
xprs[2]: product (syntactic)
xprs[3]: product (simplified)
"""
lhs = str(xdata.vars[0])
rhs = xdata.xprs[3]
rrhs = xdata.xprs[4]
xrhs = simplify_result(xdata.args[3], xdata.args[4], rhs, rrhs)
return lhs + ' = ' + xrhs
def lhs(self, xdata: InstrXData) -> List[XVariable]:
return xdata.vars
def rhs(self, xdata: InstrXData) -> List[XXpr]:
return xdata.xprs
# --------------------------------------------------------------------------
# Performs a signed multiplication of two operands. When an immediate value
# is used as an operand, it is sign-extended to the length of the destination
# operand format. The result is truncated to the length of the destination
# before it is stored in the destination register.
#
# The CF and OF flags are set when significant bit (including the sign bit)
# are carried into the upper half of the result. The CF and OF flags are
# cleared when the result (including the sign bit) fits exactly in the lower
# half of the result.
#
# Flags affected:
# For the one operand form of the instruction, the CF and OF flags are set
# when signif- icant bits are carried into the upper half of the result and
# cleared when the result fits exactly in the lower half of the result. For
# the two- and three-operand forms of the instruction, the CF and OF flags
# are set when the result must be truncated to fit in the destination operand
# size and cleared when the result fits exactly in the destination operand
# size. The SF, ZF, AF, and PF flags are undefined.
# --------------------------------------------------------------------------
def simulate(self, iaddr: str, simstate: "X86SimulationState") -> None:
src1op = self.src1_operand
src2op = self.src2_operand
dstop = self.dst_operand
src1val = simstate.get_rhs(iaddr, src1op)
src2val = simstate.get_rhs(iaddr, src2op)
if (
src1val.is_literal
and src1val.is_defined
and src1val.is_doubleword
and src2val.is_literal
and src2val.is_defined
and src2val.is_doubleword):
src1val = cast(SV.SimDoubleWordValue, src1val)
src2val = cast(SV.SimDoubleWordValue, src2val)
src2val = src2val.sign_extend(self.size)
result = src1val.mul(src2val)
lowresult = result.lowhalf
highresult = result.highhalf
if dstop.size < (result.width // 8):
simstate.set(iaddr, dstop, lowresult)
else:
simstate.set(iaddr, dstop, result)
simstate.update_flag(iaddr, 'CF', not highresult.is_zero)
simstate.update_flag(iaddr, 'OF', not highresult.is_zero)
simstate.undefine_flag(iaddr, 'SF')
simstate.undefine_flag(iaddr, 'ZF')
simstate.undefine_flag(iaddr, 'PF')
else:
raise SU.CHBSimError(
simstate,
iaddr,
("Mul values are not literal: "
+ str(src1val)
+ ", "
+ str(src2val)))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Attention(nn.Module):
def __init__(self, embedding_dim = 64, num_heads=1, softmax_temperature=1.0):
super().__init__()
self.fc_createheads = nn.Linear(embedding_dim, num_heads * embedding_dim)
self.fc_logit = nn.Linear(embedding_dim, 1)
self.fc_reduceheads = nn.Linear(num_heads * embedding_dim, embedding_dim)
self.softmax_temperature = nn.Parameter(torch.tensor(softmax_temperature))
self.activation_fnx = F.leaky_relu
def forward(self, query, context, memory):
batch_size, num_obj, obs_size = query.size()
num_head = int(self.fc_createheads.out_features / obs_size)
query = self.fc_createheads(query).view(batch_size, num_obj, num_head, obs_size)
query = query.unsqueeze(2).expand(-1, -1, memory.size(1), -1, -1) # TODO not understand
context = context.unsqueeze(1).unsqueeze(3).expand_as(query)
qc_logits = self.fc_logit(torch.tanh(context + query))
attention_probs = F.softmax(qc_logits / self.softmax_temperature, dim=2)
memory = memory.unsqueeze(1).unsqueeze(3).expand(-1, num_obj, -1, num_head, -1)
attention_heads = (memory * attention_probs).sum(2).squeeze(2)
attention_heads = self.activation_fnx(attention_heads)
attention_result = self.fc_reduceheads(attention_heads.view(batch_size, num_obj, num_head*obs_size))
return attention_result
class AttentiveGraphToGraph(nn.Module):
"""
Uses attention to perform message passing between 1-hop neighbors in a fully-connected graph
Change log
1. remove mask
2. remove layer norm
"""
def __init__(self, embedding_dim=64, num_heads=1, layer_norm=True):
super().__init__()
self.fc_qcm = nn.Linear(embedding_dim, 3 * embedding_dim)
self.attention = Attention(embedding_dim, num_heads=num_heads)
# self.layer_norm= nn.LayerNorm(3*embedding_dim) if layer_norm else None
def forward(self, vertices):
qcm_block = self.fc_qcm(vertices)
query, context, memory = qcm_block.chunk(3, dim=-1)
return self.attention(query, context, memory)
class AttentiveGraphPooling(nn.Module):
def __init__(self):
super().__init__()
self.embedding_dim = 64
self.init_w=3e-3
self.num_heads=1
self.input_independent_query = nn.Parameter(torch.Tensor(self.embedding_dim))
self.input_independent_query.data.uniform_(-self.init_w, self.init_w)
self.attention = Attention(embedding_dim = self.embedding_dim, num_heads=self.num_heads)
def forward(self, vertices):
batch_size, num_obj, obs_size = vertices.size()
query = self.input_independent_query.unsqueeze(0).unsqueeze(0).expand(batch_size, 1, -1)
context = vertices
memory = vertices
attention_result = self.attention(query, context, memory)
return attention_result
class GraphPropagation(nn.Module):
def __init__(self):
super(GraphPropagation, self).__init__()
self.num_query_heads = 1
self.num_relational_blocks = 3
self.embedding_dim = 64
self.activation_fnx = F.leaky_relu
self.graph_module_list = nn.ModuleList(
[AttentiveGraphToGraph() for _ in range(self.num_relational_blocks)])
# self.layer_norms = nn.ModuleList(
# [nn.LayerNorm(self.embedding_dim) for i in range(self.num_relational_blocks)])
def forward(self, vertices):
output = vertices
for i in range(self.num_relational_blocks):
new_output = self.graph_module_list[i](output)
new_output = output + new_output
output = self.activation_fnx(new_output)
# output = self.layer_norms[i](output)
return output
class actor_ReNN(nn.Module):
def __init__(self, env_params):
super().__init__()
self.goal_size = env_params['goal_size']
self.obj_obs_size = env_params['obj_obs_size'] #12 #15
self.robot_obs_size = env_params['robot_obs_size'] #14 #10
self.ignore_goal_size = env_params['ignore_goal_size'] #0 #3 # ignore gripper pos
self.mlp_in = nn.Sequential(
nn.Linear(self.robot_obs_size+self.obj_obs_size+self.goal_size, 64),
# nn.LayerNorm(64)
)
self.graph_propagation = GraphPropagation()
self.read_out = AttentiveGraphPooling()
self.mlp_out = nn.Sequential(
nn.Linear(64, 64),
nn.ReLU(),
# nn.LayerNorm(64),
nn.Linear(64, 64),
nn.ReLU(),
# nn.LayerNorm(64),
nn.Linear(64, 64),
nn.ReLU(),
# nn.LayerNorm(64),
nn.Linear(64, env_params['action'])
)
def forward(self, obs):
obs = self.preprocess(obs)
# ideal_vertices = torch.load('/Users/reedpan/Downloads/vertices')
vertices = self.mlp_in(obs)
# print(vertices==ideal_vertices)
# ideal_embeddings = torch.load('/Users/reedpan/Downloads/embeddings')
embeddings = self.graph_propagation(vertices)
# ideal_selected_objects = torch.load('/Users/reedpan/Downloads/selected_objects')
selected_objects = self.read_out(vertices=embeddings)
selected_objects = selected_objects.squeeze(1)
action = self.mlp_out(selected_objects)
return action
def preprocess(self, x):
'''
obs: (batch_size, obs_size)
obs_size = robot_obs_size + num_obj * object_obs_size + num_obj * goal_size
return: (batch_size, num_obj, object_obs_size + robot_obs_size)
'''
if self.ignore_goal_size > 0:
x = x[...,:-self.ignore_goal_size] # ignore useless part
batch_size, obs_size = x.shape
assert (obs_size-self.robot_obs_size) % (self.obj_obs_size + self.goal_size) == 0, \
f'Shape ERROR! obs_size{obs_size}, robot{self.robot_obs_size}, obj&goal{self.obj_obs_size+self.goal_size}'
num_obj = int((obs_size-self.robot_obs_size) / (self.obj_obs_size + self.goal_size))
robot_obs = x[:, :self.robot_obs_size].repeat(1,num_obj).reshape(batch_size, num_obj, self.robot_obs_size)
obj_obs = x[:, self.robot_obs_size : self.robot_obs_size+self.obj_obs_size*num_obj]\
.reshape(batch_size, num_obj, self.obj_obs_size)
goal_obs = x[:, self.robot_obs_size+self.obj_obs_size*num_obj:]\
.reshape(batch_size, num_obj, self.goal_size)
return torch.cat((robot_obs, obj_obs, goal_obs), dim=-1)
class critic_ReNN(nn.Module):
def __init__(self, env_params):
super().__init__()
self.goal_size = env_params['goal_size']
self.obj_obs_size = env_params['obj_obs_size'] #12 #15
self.robot_obs_size = env_params['robot_obs_size'] #14 #10
self.ignore_goal_size = env_params['ignore_goal_size'] #0 #3 # ignore gripper pos
self.mlp_in = nn.Sequential(
nn.Linear(env_params['action'] + self.robot_obs_size+self.obj_obs_size+self.goal_size, 64),
# nn.LayerNorm(64)
)
self.graph_propagation = GraphPropagation()
self.read_out = AttentiveGraphPooling()
self.mlp_out = nn.Sequential(
nn.Linear(64, 64),
nn.ReLU(),
# nn.LayerNorm(64),
nn.Linear(64, 64),
nn.ReLU(),
# nn.LayerNorm(64),
nn.Linear(64, 64),
nn.ReLU(),
# nn.LayerNorm(64),
nn.Linear(64, 1)
)
def forward(self, obs, act):
obs = self.preprocess(obs, act)
# ideal_vertices = torch.load('/Users/reedpan/Downloads/vertices')
vertices = self.mlp_in(obs)
# print(vertices==ideal_vertices)
# ideal_embeddings = torch.load('/Users/reedpan/Downloads/embeddings')
embeddings = self.graph_propagation(vertices)
# ideal_selected_objects = torch.load('/Users/reedpan/Downloads/selected_objects')
selected_objects = self.read_out(vertices=embeddings)
selected_objects = selected_objects.squeeze(1)
action = self.mlp_out(selected_objects)
return action
def preprocess(self, x, act):
'''
obs: (batch_size, obs_size)
obs_size = robot_obs_size + num_obj * object_obs_size + num_obj * goal_size
return: (batch_size, num_obj, object_obs_size + robot_obs_size)
'''
if self.ignore_goal_size > 0:
x = x[...,:-self.ignore_goal_size] # ignore useless part
batch_size, obs_size = x.shape
assert (obs_size-self.robot_obs_size) % (self.obj_obs_size + self.goal_size) == 0
num_obj = int((obs_size-self.robot_obs_size) / (self.obj_obs_size + self.goal_size))
robot_obs = x[:, :self.robot_obs_size].repeat(1,num_obj).reshape(batch_size, num_obj, self.robot_obs_size)
act = act.repeat(1,num_obj).reshape(batch_size, num_obj, -1)
obj_obs = x[:, self.robot_obs_size : self.robot_obs_size+self.obj_obs_size*num_obj]\
.reshape(batch_size, num_obj, self.obj_obs_size)
goal_obs = x[:, self.robot_obs_size+self.obj_obs_size*num_obj:]\
.reshape(batch_size, num_obj, self.goal_size)
return torch.cat((act, robot_obs, obj_obs, goal_obs), dim=-1)
if __name__ == '__main__':
import gym
import fetch_block_construction
env = gym.make('FetchBlockConstruction_1Blocks_IncrementalReward_DictstateObs_42Rendersize_FalseStackonly_SingletowerCase-v1')
obs = env.reset()
env_param = {'obs': obs['observation'].shape[0],
'goal': obs['desired_goal'].shape[0],
'action': env.action_space.shape[0],
'action_max': env.action_space.high[0],
}
actor = actor_ReNN(env_param)
critic = critic_ReNN(env_param)
# obs = torch.load('/Users/reedpan/Downloads/obs')
for i in range(100):
obs = torch.Tensor(np.append(obs['observation'], obs['desired_goal']).reshape(1, -1))
action = (actor(obs)).detach().numpy().flatten()
obs, rew, done, _ = env.step(action)
print(
critic(
torch.Tensor(np.append(obs['observation'], obs['desired_goal'])).reshape(1, -1),
torch.Tensor(action))
)
env.render(mode = 'human')
|
""" Created by Max 10/4/2017 """
from __future__ import division
import random
import math
from typing import Dict, Tuple, List
class CrossValidation:
def __init__(self, folds, learner):
"""
Constructor
:param folds: num folds
:param learner: the k-NN algorithm to use.
"""
self.folds = folds
self.learner = learner
def cross_validation_regression(self, dataset):
"""
Runs cross validation, using the k-NN regression
Creates the folds for CV.
For each fold, creates the test and training sets.
Calculate the MSE for the data sets.
Store the results.
average the MSE over the number of folds and calc SD
return values.
:param dataset: the training data set to use.
:return: average MSE, Standard deviation, the predictions, and the actuals for all cv runs
"""
random.shuffle(dataset)
fold_length = int(math.floor(len(dataset)/self.folds))
cross_validation_dataset = []
for i in range(0, len(dataset), fold_length):
cross_validation_dataset.append(dataset[i:i+fold_length])
# run cross validation
mse_list = []
predictions = []
actuals = []
for i in range(self.folds):
# construct training set.
test_set = cross_validation_dataset[i]
training_set = cross_validation_dataset[:i] + cross_validation_dataset[i+1:]
training_set = [item for sublist in training_set for item in sublist]
# Get the MSE
mse = self.calculate_mse(self.learner, training_set, test_set)
# Store results
mse_list.append(mse[0])
predictions.append(mse[1])
actuals.append(mse[2])
average_mse = sum(mse_list) / len(mse_list)
sd = self.calc_standard_deviation(average_mse, mse_list)
return average_mse, sd, predictions, actuals
def cross_validation_classification(self, dataset, pruning=False):
"""
Runs cross validation, using the DT classification
Pulls out a stratified sample for the validation set. 10%
Creates the folds for CV. USES Stratified data for each fold
For each fold, creates the test and training sets.
Calculate the error rate for the sets.
Store the results.
average the error rate over the number of folds and calc SD
return values.
:param dataset: the training data set to use.
:param pruning: boolean, if we are using pruning or not.
:return: average Error rate , Standard deviation, the predictions, and the actuals for all cv runs
"""
random.shuffle(dataset)
fold_length = int(math.floor(len(dataset)/self.folds))
cross_validation_dataset = self.get_stratified_data(dataset, fold_length, self.folds)
# run cross validation
error_list = []
predictions = []
actuals = []
models = []
for i in range(self.folds):
# construct training set.
test_set = cross_validation_dataset[i]
training_set = cross_validation_dataset[:i] + cross_validation_dataset[i+1:]
training_set = [item for sublist in training_set for item in sublist]
# calculate the error rate for the test set with the training set
self.learner.init()
model = self.learner.learn(training_set)
models.append(model)
error_rate = self.calculate_error_rate(self.learner, model, test_set)
# Store results
error_list.append(error_rate[0])
predictions.append(error_rate[1])
actuals.append(error_rate[2])
average_error_rate = sum(error_list) / len(error_list)
sd = self.calc_standard_deviation(average_error_rate, error_list)
return average_error_rate, sd, models, predictions, actuals
def calc_standard_deviation(self, average, list_of_values):
"""
Calculates the SD of the Cross validation.
:param average: average error for CV float
:param list_of_values: list of errors for CV
:return: sd of CV
"""
sd = 0
for x in list_of_values:
sd += (x - average) ** 2
sd /= len(list_of_values)
sd = math.sqrt(sd)
return sd
def calculate_mse(self, learner, training_data, test_data):
"""
Helper function for calculating MSE, and tracking actual and predicted values.
Calculate the squared error for each pair of points, and sum over all the squared errors.
Then divide by the number of squared errors.
:param learner: the k-NN regression class
:param training_data: the data set for the model
:param test_data: the query points to get predictions for.
:return: (MSE, list of predictions, list of corresponding actuals)
"""
squared_error = []
predictions = learner.test(training_data, test_data)
actual = []
for prediction, test_item in zip(predictions, test_data):
actual.append(test_item[-1])
squared_error.append(self.get_squared_error(prediction, test_item))
squared_error_sum = sum(squared_error)
mse = squared_error_sum / len(squared_error)
return mse, predictions, actual
def get_squared_error(self, predicted_value, item):
"""
Calculates the squared error of predicted points, and actual items
:param predicted_value: list of floats
:param item: list of query data points.
:return: Squared error
"""
actual_value = item[-1]
squared_error = (predicted_value - actual_value)**2
return squared_error
def calculate_error_rate(self, learner, model, test_data):
"""
Calculates the error rate for classification.
tracks the actual and predictions
:param learner: a classifier
:param model: model of the learner
:param test_data: query points for
:return: error_rate, list of predictions, the actual values.
"""
predictions = learner.classify(model, test_data)
actuals = []
num_errors = 0
for prediction, test_item in zip(predictions, test_data):
# if type(prediction) is tuple:
# actual_prediction = prediction[0]
# else:
# actual_prediction = prediction[0][0]
actual_prediction = prediction
actuals.append(test_item[-1])
if actual_prediction != test_item[-1]:
num_errors += 1
error_rate = num_errors / len(predictions)
return (error_rate, predictions, actuals)
def get_stratified_data(self, dataset, fold_length, num_folds):
"""
Creates the Cross Validation fold with Stratified data, i.e. data that matches the distribution of the overall
data set.
segment the data
calculate distribution of classes
create x folds according to that distribution, without replacement.
:param dataset: list of list of data points with labels
:param fold_length: number of points in each fold
:param num_folds: number of folds to build.
:return: a list of list of datapoints, where each inner list is a fold in the CV
"""
# for all data
unique_labels = {}
labeled_datapoints = {}
# build dict of listed segmented datapoints
for datapoint in dataset:
label = datapoint[-1]
if label in unique_labels:
unique_labels[label] += 1
labeled_datapoints[label].append(datapoint)
else:
unique_labels[label] = 1
labeled_datapoints[label] = [datapoint]
# Calculate the class distribution
distribution = {}
for key in unique_labels:
distribution[key] = unique_labels[key] / len(dataset)
fold_data_set = []
for x in range(num_folds):
single_fold = self.build_single_fold(distribution, labeled_datapoints, fold_length)
fold_data_set.append(single_fold)
return fold_data_set
def build_single_fold(self, distribution, labeled_datapoints, fold_length):
"""
Builds a single CV fold according to a distribution without replacement.
:param distribution: dict of dist of the classes
:param labeled_datapoints: dict of labeled data points
:param fold_length: number of data poitns in a fold
:return: list of data points in the fold.
"""
# build a single fold
fold = []
for key in distribution:
# get number of data points for this class
number_of_items_per_class = int(distribution[key] * fold_length)
if number_of_items_per_class == 0:
number_of_items_per_class = 1
# select the data points for this class in this fold.
single_key_datapoints = []
for x in range(number_of_items_per_class):
datapoint_possibilities = labeled_datapoints[key]
if len(datapoint_possibilities) == 0:
continue
selected_datapoint_index = random.randint(0, len(datapoint_possibilities) - 1)
single_key_datapoints.append(datapoint_possibilities[selected_datapoint_index])
# remove datapoint from being selected again.
del datapoint_possibilities[selected_datapoint_index]
fold = fold + single_key_datapoints
return fold
def get_validation_set(self, dataset: List[List], percentage_of_data_for_validation: int) -> Tuple[List[list], List[list]]:
"""
Creates a validation set of the data and deletes the data used for validation from the original data set
:param dataset: list of lsit
:param percentage_of_data_for_validation: int percentage to use
:return: tuple( validation set, modified data set
"""
fold_length = int(math.floor(len(dataset) / percentage_of_data_for_validation))
stratified_data = self.get_stratified_data(dataset, fold_length, 1)
stratified_data = stratified_data[0]
for stratified_data_point in stratified_data:
if stratified_data_point in dataset:
index = dataset.index(stratified_data_point)
del dataset[index]
return stratified_data, dataset
|
from Jarvis import JarvisAssistant
import re
import os
import random
import pprint
import datetime
import requests
import sys
import urllib.parse
import pyjokes
import time
import pyautogui
import pywhatkit
import wolframalpha
from PIL import Image
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import QTimer, QTime, QDate, Qt
from PyQt5.QtGui import QMovie
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.uic import loadUiType
from Jarvis.features.gui import Ui_MainWindow
from Jarvis.config import config
obj = JarvisAssistant()
# ================================ MEMORY ===========================================================================================================
GREETINGS = ["hello ", "jarvis", "wake up jarvis", "you there jarvis", "time to work jarvis", "hey jarvis",
"ok jarvis", "are you there"]
GREETINGS_RES = ["always there for you sir", "i am ready sir",
"your wish my command", "how can i help you sir?", "i am online and ready sir"]
EMAIL_DIC = {
'myself': 'atharvaaingle@gmail.com',
'my official email': 'atharvaaingle@gmail.com',
'my second email': 'atharvaaingle@gmail.com',
'my official mail': 'atharvaaingle@gmail.com',
'my second mail': 'atharvaaingle@gmail.com'
}
CALENDAR_STRS = ["what do i have", "do i have plans", "am i busy"]
# =======================================================================================================================================================
def speak(text):
obj.tts(text)
app_id = config.wolframalpha_id
def computational_intelligence(question):
try:
client = wolframalpha.Client(app_id)
answer = client.query(question)
answer = next(answer.results).text
print(answer)
return answer
except:
speak("Sorry sir I couldn't fetch your question's answer. Please try again ")
return None
def startup():
# speak("Initializing Jarvis")
# speak("Starting all systems applications")
# speak("Installing and checking all drivers")
# speak("Caliberating and examining all the core processors")
# speak("Checking the internet connection")
# speak("Wait a moment sir")
# speak("All drivers are up and running")
# speak("All systems have been activated")
speak("Now I am online")
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<=12:
speak("Good Morning")
elif hour>12 and hour<18:
speak("Good afternoon")
else:
speak("Good evening")
c_time = obj.tell_time()
speak(f"Currently it is {c_time}")
speak("I am Jarvis. Online and ready sir. Please tell me how may I help you")
def wish():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<=12:
speak("Good Morning")
elif hour>12 and hour<18:
speak("Good afternoon")
else:
speak("Good evening")
c_time = obj.tell_time()
speak(f"Currently it is {c_time}")
speak("I am Jarvis. Online and ready sir. Please tell me how may I help you")
# if __name__ == "__main__":
class MainThread(QThread):
def __init__(self):
super(MainThread, self).__init__()
def run(self):
self.TaskExecution()
def TaskExecution(self):
# startup()
# wish()
while True:
command = obj.mic_input()
if re.search('date', command):
date = obj.tell_me_date()
print(date)
speak(date)
elif "time" in command:
time_c = obj.tell_time()
print(time_c)
speak(f"Sir the time is {time_c}")
elif re.search('launch', command):
dict_app = {
'chrome': 'C:/Program Files/Google/Chrome/Application/chrome'
}
app = command.split(' ', 1)[1]
path = dict_app.get(app)
if path is None:
speak('Application path not found')
print('Application path not found')
else:
speak('Launching: ' + app + 'for you sir!')
obj.launch_any_app(path_of_app=path)
elif command in GREETINGS:
speak(random.choice(GREETINGS_RES))
elif re.search('open', command):
domain = command.split(' ')[-1]
open_result = obj.website_opener(domain)
speak(f'Alright sir !! Opening {domain}')
print(open_result)
elif re.search('weather', command):
city = command.split(' ')[-1]
weather_res = obj.weather(city=city)
print(weather_res)
speak(weather_res)
elif re.search('tell me about', command):
topic = command.split(' ')[-1]
if topic:
wiki_res = obj.tell_me(topic)
print(wiki_res)
speak(wiki_res)
else:
speak(
"Sorry sir. I couldn't load your query from my database. Please try again")
elif "buzzing" in command or "news" in command or "headlines" in command:
news_res = obj.news()
speak('Source: The Times Of India')
speak('Todays Headlines are..')
for index, articles in enumerate(news_res):
pprint.pprint(articles['title'])
speak(articles['title'])
if index == len(news_res)-2:
break
speak('These were the top headlines, Have a nice day Sir!!..')
b
elif 'search google for' in command:
obj.search_anything_google(command)
elif "play music" in command or "hit some music" in command:
music_dir = "F://Songs//Imagine_Dragons"
songs = os.listdir(music_dir)
for song in songs:
os.startfile(os.path.join(music_dir, song))
elif 'youtube' in command:
video = command.split(' ')[1]
speak(f"Okay sir, playing {video} on youtube")
pywhatkit.playonyt(video)
elif "email" in command or "send email" in command:
sender_email = config.email
sender_password = config.email_password
try:
speak("Whom do you want to email sir ?")
recipient = obj.mic_input()
receiver_email = EMAIL_DIC.get(recipient)
if receiver_email:
speak("What is the subject sir ?")
subject = obj.mic_input()
speak("What should I say?")
message = obj.mic_input()
msg = 'Subject: {}\n\n{}'.format(subject, message)
obj.send_mail(sender_email, sender_password,
receiver_email, msg)
speak("Email has been successfully sent")
time.sleep(2)
else:
speak(
"I coudn't find the requested person's email in my database. Please try again with a different name")
except:
speak("Sorry sir. Couldn't send your mail. Please try again")
elif "calculate" in command:
question = command
answer = computational_intelligence(question)
speak(answer)
elif "what is" in command or "who is" in command:
question = command
answer = computational_intelligence(question)
speak(answer)
elif "what do i have" in command or "do i have plans" or "am i busy" in command:
obj.google_calendar_events(command)
if "make a note" in command or "write this down" in command or "remember this" in command:
speak("What would you like me to write down?")
note_text = obj.mic_input()
obj.take_note(note_text)
speak("I've made a note of that")
elif "close the note" in command or "close notepad" in command:
speak("Okay sir, closing notepad")
os.system("taskkill /f /im notepad++.exe")
if "joke" in command:
joke = pyjokes.get_joke()
print(joke)
speak(joke)
elif "system" in command:
sys_info = obj.system_info()
print(sys_info)
speak(sys_info)
elif "where is" in command:
place = command.split('where is ', 1)[1]
current_loc, target_loc, distance = obj.location(place)
city = target_loc.get('city', '')
state = target_loc.get('state', '')
country = target_loc.get('country', '')
time.sleep(1)
try:
if city:
res = f"{place} is in {state} state and country {country}. It is {distance} km away from your current location"
print(res)
speak(res)
else:
res = f"{state} is a state in {country}. It is {distance} km away from your current location"
print(res)
speak(res)
except:
res = "Sorry sir, I couldn't get the co-ordinates of the location you requested. Please try again"
speak(res)
elif "ip address" in command:
ip = requests.get('https://api.ipify.org').text
print(ip)
speak(f"Your ip address is {ip}")
elif "switch the window" in command or "switch window" in command:
speak("Okay sir, Switching the window")
pyautogui.keyDown("alt")
pyautogui.press("tab")
time.sleep(1)
pyautogui.keyUp("alt")
elif "where i am" in command or "current location" in command or "where am i" in command:
try:
city, state, country = obj.my_location()
print(city, state, country)
speak(
f"You are currently in {city} city which is in {state} state and country {country}")
except Exception as e:
speak(
"Sorry sir, I coundn't fetch your current location. Please try again")
elif "take screenshot" in command or "take a screenshot" in command or "capture the screen" in command:
speak("By what name do you want to save the screenshot?")
name = obj.mic_input()
speak("Alright sir, taking the screenshot")
img = pyautogui.screenshot()
name = f"{name}.png"
img.save(name)
speak("The screenshot has been succesfully captured")
elif "show me the screenshot" in command:
try:
img = Image.open('C://Users//prathmesh//Desktop//PROJECTS//Voice-Assistant' + name)
img.show(img)
speak("Here it is sir")
time.sleep(2)
except IOError:
speak("Sorry sir, I am unable to display the screenshot")
elif "hide all files" in command or "hide this folder" in command:
os.system("attrib +h /s /d")
speak("Sir, all the files in this folder are now hidden")
elif "visible" in command or "make files visible" in command:
os.system("attrib -h /s /d")
speak("Sir, all the files in this folder are now visible to everyone. I hope you are taking this decision in your own peace")
if "calculate" in command or "what is" in command:
query = command
answer = computational_intelligence(query)
speak(answer)
elif "goodbye" in command or "offline" in command or "bye" in command:
speak("Alright sir, going offline. It was nice working with you")
sys.exit()
startExecution = MainThread()
class Main(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.pushButton.clicked.connect(self.startTask)
self.ui.pushButton_2.clicked.connect(self.close)
def __del__(self):
sys.stdout = sys.__stdout__
# def run(self):
# self.TaskExection
def startTask(self):
self.ui.movie = QtGui.QMovie("Jarvis/utils/images/live_wallpaper.gif")
self.ui.label.setMovie(self.ui.movie)
self.ui.movie.start()
self.ui.movie = QtGui.QMovie("Jarvis/utils/images/initiating.gif")
self.ui.label_2.setMovie(self.ui.movie)
self.ui.movie.start()
timer = QTimer(self)
timer.timeout.connect(self.showTime)
timer.start(1000)
startExecution.start()
def showTime(self):
current_time = QTime.currentTime()
current_date = QDate.currentDate()
label_time = current_time.toString('hh:mm:ss')
label_date = current_date.toString(Qt.ISODate)
self.ui.textBrowser.setText(label_date)
self.ui.textBrowser_2.setText(label_time)
app = QApplication(sys.argv)
jarvis = Main()
jarvis.show()
exit(app.exec_())
|
import celery
from analysis.models import VariantTag, Analysis
from analysis.models.nodes.node_utils import update_analysis
from library.guardian_utils import admin_bot
from snpdb.clingen_allele import populate_clingen_alleles_for_variants
from snpdb.liftover import create_liftover_pipelines
from snpdb.models import ImportSource, VariantAlleleSource, VariantAllele
@celery.shared_task
def analysis_tag_created_task(variant_tag_id):
""" Do this async to save a few miliseconds when adding/removing tags """
try:
variant_tag = VariantTag.objects.get(pk=variant_tag_id)
except VariantTag.DoesNotExist:
return # Deleted before this got run, doesn't matter...
update_analysis(variant_tag.analysis.pk)
_liftover_variant_tag(variant_tag)
@celery.shared_task
def analysis_tag_deleted_task(analysis_id, _tag_id):
""" Do this async to save a few miliseconds when adding/removing tags """
analysis = Analysis.objects.get(pk=analysis_id)
update_analysis(analysis.pk)
def _liftover_variant_tag(variant_tag: VariantTag):
genome_build = variant_tag.analysis.genome_build
populate_clingen_alleles_for_variants(genome_build, [variant_tag.variant])
variant_allele = VariantAllele.objects.get(variant=variant_tag.variant, genome_build=genome_build)
allele_source = VariantAlleleSource.objects.create(variant_allele=variant_allele)
create_liftover_pipelines(admin_bot(), allele_source, ImportSource.WEB, genome_build)
|
# import modules urllib, thread, lock and queue
import urllib.request, urllib.error
from threading import Thread, Lock
from queue import Queue
q = Queue()
list_lock = Lock()
discovered_usernames = []
# scan_sites from username
def scan_sites(username):
global q
while True:
sites = q.get()
url = f"{sites}{username}"
try:
conn = urllib.request.urlopen(url)
# 404 error code, page does not exist
except urllib.error.HTTPError as e:
# create text file and save profile pages that dosent exist
text_file = open(f"{username}""_not_found.txt", "a")
text_file.write(url)
text_file.write("\n")
text_file.close()
print("[-] No user found:", f"{sites}")
# connection reffused to site
except urllib.error.URLError as e:
# create text file error_logs.txt and enter sites that reffused to connect
text_file = open("error_logs/error_logs.txt", "a")
text_file.write(url)
text_file.write("\n")
text_file.close()
# show sites that reffused to connect
print('[!] Connection reffused to', f"{sites}")
pass
else:
# export found profile page urls to a text file
text_file = open(f"{username}"".txt", "a")
text_file.write(url)
text_file.write("\n")
text_file.close()
print("[+] User found in", url)
with list_lock:
discovered_usernames.append(url)
q.task_done()
def main(username, n_threads, sites):
global q
for sites in sites:
q.put(sites)
for t in range(n_threads):
worker = Thread(target=scan_sites, args=(username,))
worker.daemon = True
worker.start()
if __name__ == "__main__":
import argparse
print("""
___ _ __ _ _
/ __| '_ \| | | |
\__ | |_) | |_| |
|___| .__/ \__, |
|_| |___/
# Social account scanner by @cyb3r-g0d
""" )
parser = argparse.ArgumentParser(description="An OSINT tool to scan social media accounts by username across social networks")
parser.add_argument("username", help="username to scan for sites")
parser.add_argument("-l", "--sitelist", help="File that contains all sites to scan, line by line. Default is sites.txt",
default="res/sites.txt")
parser.add_argument("-t", "--num-threads", help="Number of threads to use to scan the username. Default is 10", default=10, type=int)
args = parser.parse_args()
username = args.username
sitelist = args.sitelist
num_threads = args.num_threads
main(username=username, n_threads=num_threads, sites=open(sitelist).read().splitlines())
q.join()
|
import sys
def parse(filename="/usr/include/linux/input.h"):
f = open(filename)
ord_desc_map = {}
for l in f:
l = l.strip()
s = l.split()
if len(s) < 3:
continue
if s[0] != "#define" or not s[1].startswith("KEY_"):
continue
key_val = s[2]
if key_val.startswith("0x"):
base = 16
elif key_val.isdigit():
base = 10
else:
print >> sys.stderr, "invalid", s
continue
ord_desc_map[int(key_val, base)] = s[1][4:].lower()
f.close()
return ord_desc_map
if __name__ == "__main__":
print parse()
|
UNITS = {
'cd': 1.0,
'cp': 0.981,
'hk': 0.920,
}
|
import unittest
from algorithms.say_number import say
class SayTest(unittest.TestCase):
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
def test_one(self):
self.assertEqual("one", say(1))
def test_fourteen(self):
self.assertEqual("fourteen", say(14))
def test_twenty(self):
self.assertEqual("twenty", say(20))
def test_twenty_two(self):
self.assertEqual("twenty-two", say(22))
def test_one_hundred(self):
self.assertEqual("one hundred", say(100))
def test_one_hundred_twenty(self):
self.assertEqual("one hundred and twenty", say(120))
def test_one_hundred_twenty_three(self):
self.assertEqual("one hundred and twenty-three", say(123))
def test_one_thousand(self):
self.assertEqual("one thousand", say(1000))
def test_one_thousand_two_hundred_thirty_four(self):
self.assertEqual("one thousand two hundred and thirty-four", say(1234))
def test_one_million(self):
self.assertEqual("one million", say(1e6))
def test_one_million_two(self):
self.assertEqual("one million and two", say(1000002))
def test_1002345(self):
self.assertEqual(
"one million two thousand three hundred and forty-five", say(1002345)
)
def test_one_billion(self):
self.assertEqual("one billion", say(1e9))
def test_number_to_large(self):
with self.assertRaisesWithMessage(ValueError):
say(1e12)
def test_number_negative(self):
with self.assertRaisesWithMessage(ValueError):
say(-42)
def test_zero(self):
self.assertEqual("zero", say(0))
def test_115(self):
self.assertEqual("one hundred and fifteen", say(115))
def test_987654321123(self):
self.assertEqual(
"nine hundred and eighty-seven billion "
+ "six hundred and fifty-four million "
+ "three hundred and twenty-one thousand "
+ "one hundred and twenty-three",
say(987654321123),
)
def test_number_too_large(self):
with self.assertRaisesWithMessage(ValueError):
say(1e12)
if __name__ == "__main__":
unittest.main()
|
import argparse
import os
import time
import uuid
from backbone.base import Base as BackboneBase
from config.eval_config import EvalConfig as Config
from dataset.base import Base as DatasetBase
from evaluator import Evaluator
from logger import Logger as Log
from model import Model
from roi.pooler import Pooler
def _eval(path_to_checkpoint: str, dataset_name: str, backbone_name: str, path_to_data_dir: str, path_to_results_dir: str):
dataset = DatasetBase.from_name(dataset_name)(path_to_data_dir, DatasetBase.Mode.EVAL, Config.IMAGE_MIN_SIDE, Config.IMAGE_MAX_SIDE)
evaluator = Evaluator(dataset, path_to_data_dir, path_to_results_dir)
Log.i('Found {:d} samples'.format(len(dataset)))
backbone = BackboneBase.from_name(backbone_name)(pretrained=False)
model = Model(backbone, dataset.num_classes(),
pooler_mode=Config.POOLER_MODE,
anchor_ratios=Config.ANCHOR_RATIOS,
anchor_sizes=Config.ANCHOR_SIZES,
rpn_pre_nms_top_n=Config.RPN_PRE_NMS_TOP_N,
rpn_post_nms_top_n=Config.RPN_POST_NMS_TOP_N).cuda()
model.load(path_to_checkpoint)
Log.i('Start evaluating with 1 GPU (1 batch per GPU)')
mean_ap, detail = evaluator.evaluate(model)
Log.i('Done')
Log.i('mean AP = {:.4f}'.format(mean_ap))
Log.i('\n' + detail)
if __name__ == '__main__':
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--dataset', type=str, choices=DatasetBase.OPTIONS, required=True, help='name of dataset')
parser.add_argument('-b', '--backbone', type=str, choices=BackboneBase.OPTIONS, required=True, help='name of backbone model')
parser.add_argument('-d', '--data_dir', type=str, default='./data', help='path to data directory')
parser.add_argument('--image_min_side', type=float, help='default: {:g}'.format(Config.IMAGE_MIN_SIDE))
parser.add_argument('--image_max_side', type=float, help='default: {:g}'.format(Config.IMAGE_MAX_SIDE))
parser.add_argument('--anchor_ratios', type=str, help='default: "{!s}"'.format(Config.ANCHOR_RATIOS))
parser.add_argument('--anchor_sizes', type=str, help='default: "{!s}"'.format(Config.ANCHOR_SIZES))
parser.add_argument('--pooler_mode', type=str, choices=Pooler.OPTIONS, help='default: {.value:s}'.format(Config.POOLER_MODE))
parser.add_argument('--rpn_pre_nms_top_n', type=int, help='default: {:d}'.format(Config.RPN_PRE_NMS_TOP_N))
parser.add_argument('--rpn_post_nms_top_n', type=int, help='default: {:d}'.format(Config.RPN_POST_NMS_TOP_N))
parser.add_argument('checkpoint', type=str, help='path to evaluating checkpoint')
args = parser.parse_args()
path_to_checkpoint = args.checkpoint
dataset_name = args.dataset
backbone_name = args.backbone
path_to_data_dir = args.data_dir
path_to_results_dir = os.path.join(os.path.dirname(path_to_checkpoint), 'results-{:s}-{:s}-{:s}'.format(
time.strftime('%Y%m%d%H%M%S'), path_to_checkpoint.split(os.path.sep)[-1].split(os.path.curdir)[0],
str(uuid.uuid4()).split('-')[0]))
os.makedirs(path_to_results_dir)
Config.setup(image_min_side=args.image_min_side,
image_max_side=args.image_max_side,
anchor_ratios=args.anchor_ratios,
anchor_sizes=args.anchor_sizes,
pooler_mode=args.pooler_mode,
rpn_pre_nms_top_n=args.rpn_pre_nms_top_n,
rpn_post_nms_top_n=args.rpn_post_nms_top_n)
Log.initialize(os.path.join(path_to_results_dir, 'eval.log'))
Log.i('Arguments:')
for k, v in vars(args).items():
Log.i(f'\t{k} = {v}')
Log.i(Config.describe())
_eval(path_to_checkpoint, dataset_name, backbone_name, path_to_data_dir, path_to_results_dir)
main()
|
import datetime
from cache_memoize import cache_memoize
from biblepaycentral.biblepay.clients import BiblePayRpcClient
@cache_memoize(timeout=3600)
def estimate_blockhittime(height):
""" we try to calculate when the block will hit.
For that, we get the current block, 500 blocks int he past, take the time between them
and look how much time it is in the future with the same time-per-block """
client = BiblePayRpcClient('main')
current_height = client.getblockcount()
dt = None
if current_height >= height: # already at or after the superblock? then use that time
block_hash = client.getblockhash(height)
block = client.getblock(block_hash)
dt = datetime.datetime.utcfromtimestamp(block['mediantime'])
else:
past_block_sub = 200
block_hash = client.getblockhash(current_height)
block = client.getblock(block_hash)
dt_current = datetime.datetime.utcfromtimestamp(block['mediantime'])
past_block = current_height - past_block_sub
block_hash = client.getblockhash(past_block)
block = client.getblock(block_hash)
dt_past = datetime.datetime.utcfromtimestamp(block['mediantime'])
# the avg time between two blocks is the result
diff = (dt_current - dt_past) / past_block_sub
# now we count how many blocks the required block is in the future
diff_count = height - current_height
# and we add the avgtimer per block * future blocks until the required block
# to "now"
dt = datetime.datetime.now() + (diff * diff_count)
return dt
|
# Generated by Django 2.1.5 on 2019-02-06 17:06
from django.db import migrations
class Migration(migrations.Migration):
atomic=False
dependencies = [
('bookshelf', '0015_auto_20190206_1703'),
]
operations = [
migrations.RenameModel(
old_name='A_Log',
new_name='Log',
),
]
|
"""
In PyAnsys I type
```
test_result = pyansys.open_result('sample.rst')
estress,elem,enode = test_result.element_stress(0)
print(estress[23])
print(enode[23])
```
And get
```
[[nan nan nan nan nan nan]
[nan nan nan nan nan nan]
[nan nan nan nan nan nan]
[nan nan nan nan nan nan]]
[ 1 82 92 8]
```
And in Ansys I get
```
ELEMENT= 24 SHELL281
NODE SX SY SZ SXY SYZ SXZ
1 -50.863 -0.63898E-030 -215.25 -0.18465E-015 0.10251E-013 -47.847
82 13.635 -0.74815E-030 -178.71 -0.21958E-015 0.11999E-013 17.232
92 -7.1801 -0.84355E-030 -213.77 -0.56253E-015 0.13214E-013 2.0152
8 -47.523 -0.96156E-030 -204.12 -0.30574E-014 0.12646E-013 2.4081
1 107.75 0.13549E-029 454.30 0.45391E-015-0.21674E-013 100.34
82 -28.816 0.15515E-029 372.47 0.38337E-015-0.24955E-013 -35.077
92 14.454 0.16547E-029 429.43 0.80716E-015-0.26217E-013 1.2719
8 94.254 0.19148E-029 409.31 0.59899E-014-0.25281E-013 -3.5690
```
It would also be really useful to be able to read the Nodal Forces and Moment from the Elemental Solution using the:
element_solution_data(0,'ENF',sort=True)
From PyAnsys for element 24:
```
array([ 7.1140683e-01, 2.5775826e-06, 1.8592998e+00, 1.7531972e-03,
-5.4216904e-12, -6.6381943e-04, 7.8414015e-02, 4.7199319e-06,
-1.2074181e+00, -9.0049638e-04, -5.2645028e-12, -3.2152122e-05,
7.3660083e-02, 2.5742002e-05, -1.1951995e+00, -2.7250897e-04,
1.0039868e-12, 1.5112829e-04, -1.9362889e-01, 4.7199323e-06,
1.3849777e+00, 6.4305059e-05, 2.9884493e-12, -2.2116321e-04,
3.0604819e-02, -4.8676171e-05, -1.0389121e-01, 5.7917450e-16,
-2.7263033e-25, 4.0045388e-17, -8.5023224e-02, 2.9796447e-05,
-5.3827515e+00, -2.2202423e-03, 3.8493188e-11, 7.6806801e-04,
-8.5418850e-01, 2.4989351e-06, -3.3126956e-01, -9.2828198e-04,
6.3002242e-11, 8.8052053e-05, 2.3875487e-01, -2.1378659e-05,
4.9762526e+00, 2.5969518e-03, 5.0141464e-11, -1.8303801e-04],
dtype=float32)])
```
And From Ansys:
```
ELEM= 24 FX FY FZ
1 0.71141 0.25776E-005 1.8593
82 0.78414E-001 0.47199E-005 -1.2074
92 0.73660E-001 0.25742E-004 -1.1952
8 -0.19363 0.47199E-005 1.3850
83 0.30605E-001-0.48676E-004-0.10389
86 -0.85023E-001 0.29796E-004 -5.3828
93 -0.85419 0.24989E-005-0.33127
9 0.23875 -0.21379E-004 4.9763
ELEM= 24 MX MY MZ
1 0.17532E-002-0.54217E-011-0.66382E-003
82 -0.90050E-003-0.52645E-011-0.32152E-004
92 -0.27251E-003 0.10040E-011 0.15113E-003
8 0.64305E-004 0.29884E-011-0.22116E-003
83 0.57917E-015-0.27263E-024 0.40045E-016
86 -0.22202E-002 0.38493E-010 0.76807E-003
93 -0.92828E-003 0.63002E-010 0.88052E-004
9 0.25970E-002 0.50141E-010-0.18304E-003
"""
import os
import numpy as np
import pyansys
# from pyansys.examples import hexarchivefile
# from pyansys.examples import rstfile
# from pyansys.examples import fullfile
try:
__file__
test_path = os.path.dirname(os.path.abspath(__file__))
testfiles_path = os.path.join(test_path, 'testfiles')
except:
testfiles_path = '/home/alex/afrl/python/source/pyansys/tests/testfiles'
ANSYS_ELEM = [[0.17662E-07, 79.410, -11.979, -0.11843E-02, 4.8423, -0.72216E-04],
[0.20287E-07, 91.212, 27.364, -0.13603E-02, 4.8423, -0.72216E-04],
[0.20287E-07, 91.212, 27.364, -0.13603E-02, -4.8423, 0.72216E-04],
[0.17662E-07, 79.410, -11.979, -0.11843E-02, -4.8423, 0.72216E-04]]
ANSYS_NODE = [[0.20287E-07, 91.212, 27.364, -0.13603E-02, 4.8423, -0.72216E-04],
[0.17662E-07, 79.410, -11.979, -0.11843E-02, 4.8423, -0.72216E-04],
[0.17662E-07, 79.410, -11.979, -0.11843E-02, -4.8423, 0.72216E-04],
[0.20287E-07, 91.212, 27.364, -0.13603E-02, -4.8423, 0.72216E-04]]
result_file = os.path.join(testfiles_path, 'shell281.rst')
test_result = pyansys.open_result(result_file, valid_element_types=['281'])
# estress, elem, enode = test_result.element_stress(0, in_element_coord_sys=False)
estress, elem, enode = test_result.element_stress(0, in_element_coord_sys=True)
print(estress[23][:4])
# debug
np.any(np.isclose(-50.863, estress[23]))
np.isclose(-50.863, estress[23]).any(1).nonzero()
# np.isclose(-50.863, table).any(1).nonzero()
f.seek(400284 - 8)
table = read_table(f, 'f')
# f.seek(400284)
ncomp = 6
nodstr = 4
nl = 7
# nread = nl*3*nodstr*ncomp
# table = read_table(f, 'f', get_nread=False, nread=nread)
print((np.isclose(-50.863, table).nonzero()[0] - 1)/table.size)
# print(read_table(f, 'i', get_nread=False, nread=1))
# print(table[:10])
# # elem, res = test_result.element_solution_data(0, 'ENF', sort=True)
# # print(res[23].reshape(8, -1))
# # fseek(cfile, (ele_table + PTR_ENS_IDX)*4, SEEK_SET)
# # fread(&ptrENS, sizeof(int32_t), 1, cfile)
# # fseek(cfile, (ele_table + ptrENS)*4, SEEK_SET)
# # fread(&ele_data_arr[c, 0], sizeof(float), nread, cfile)
# # number of items in this record is NL*3*nodstr*ncomp
# ncomp = 6
# nodstr = 4
|
import uuid
from django.conf import settings
from django.db import models
from django.db.models.functions import Now
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from config.utils import upload_to_classname_uuid
class Safe(models.Model):
'''
Model representing user's safe that contain all secrets.
'''
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_('owner'),
related_name='safes',
)
image = models.ImageField(
_('image'),
upload_to=upload_to_classname_uuid,
blank=True,
)
data = models.TextField(
_('encrypted data'),
blank=True,
)
date_created = models.DateTimeField(
_('created at'),
auto_now_add=True,
)
last_accessed = models.DateTimeField(
_('last accessed'),
default=timezone.now,
blank=True,
)
class Meta:
verbose_name = _('Safe')
verbose_name_plural = _('Safes')
def __str__(self):
return f'{self.id} (UserID: {self.owner_id})'
def update_last_access_time(self):
self.__class__.objects.filter(id=self.id).update(last_accessed=Now())
class SafeItem(models.Model):
'''
Model representing single item in safe.
'''
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
safe = models.ForeignKey(
Safe,
on_delete=models.CASCADE,
verbose_name=_('safe'),
related_name='items',
)
data = models.TextField(
_('encrypted data'),
blank=True,
)
class Meta:
verbose_name = _('Safe item')
verbose_name_plural = _('Safe items')
def __str__(self):
return f'{self.id} (SafeID: {self.safe_id})'
|
"""Entry point of this module."""
import json
import logging
import logging.config
import os
import time
from pathlib import Path
from py_parser_sber.sberbank_parse import SberbankClientParser
from py_parser_sber.utils import (
Retry,
get_transaction_interval,
)
logger = logging.getLogger(__name__)
def _setup_logging(logging_path='logging.json'):
curr_dir = Path(__file__).resolve().parents[0]
with curr_dir.joinpath(logging_path).open() as f:
config = json.load(f)
logging.config.dictConfig(config)
def _runner():
logger.info('Start parsing...')
need_env_vars = ['LOGIN', 'PASSWORD', 'SERVER_URL', 'SEND_ACCOUNT_URL', 'SEND_PAYMENT_URL']
need_data_for_start = {k.lower(): os.environ[k] for k in need_env_vars}
need_data_for_start['transactions_interval'] = get_transaction_interval()
need_data_for_start['server_port'] = os.getenv('SERVER_PORT', 80)
need_data_for_start['server_scheme'] = os.getenv('SERVER_SCHEME', 'http')
sber = SberbankClientParser(**need_data_for_start)
try:
sber.auth()
sber.accounts_page_parser()
sber.transactions_pages_parser()
sber.send_account_data()
sber.send_payment_data()
logger.info('Success iteration')
finally:
sber.close()
def py_parser_sber_run_once():
"""Entry point for run parsing once."""
_setup_logging()
retry = Retry(function=_runner, error=Exception, max_attempts=2)
retry()
def py_parser_sber_run_infinite():
"""Entry point for run parsing after get_transaction_interval."""
_setup_logging()
retry = Retry(function=_runner, error=Exception, max_attempts=3)
while 1:
try:
retry()
finally:
hours = os.getenv("HOURS", 0)
days = os.getenv("DAYS", 0 if hours else 1)
logger.info(f'Waiting for a new transactions after {days} days and {hours} hours')
time.sleep(get_transaction_interval())
if __name__ == '__main__':
py_parser_sber_run_once()
|
from graphsage.utils import load_data
from graphsage.supervised_train import train
import sys
def main(targets):
train_data = load_data('data/sage_NBA/nba')
train(train_data)
if __name__ == '__main__':
targets = sys.argv[1:]
main(targets)
|
import os.path as op
camcan_path = '/storage/store/data/camcan'
camcan_meg_path = op.join(
camcan_path, 'camcan47/cc700/meg/pipeline/release004/')
camcan_meg_raw_path = op.join(camcan_meg_path, 'data/aamod_meg_get_fif_00001')
mne_camcan_freesurfer_path = (
'/storage/store/data/camcan-mne/freesurfer')
derivative_path = '/storage/store/derivatives/camcan/pipelines/base2018/MEG'
import os
from os import uname
host = uname()[1]
user = os.environ['USER']
if host == 'toothless':
subjects_dir = '/home/sik/Dropbox/Biomag2018_epilepsy_challenge/freesurfer'
mne_data_path = '/home/sik/Dropbox/Biomag2018_epilepsy_challenge/original_data'
# subjects_dir = '/home/sik/retreat/Biomag2018/freesurfer'
# mne_data_path = '/home/sik/retreat/Biomag2018/original_data'
elif user == 'alex':
subjects_dir = '/Users/alex/Dropbox/Biomag2018_epilepsy_challenge/freesurfer'
mne_data_path = '/Users/alex/Dropbox/Biomag2018_epilepsy_challenge/original_data'
# subjects_dir = '/Users/alex/work/data/retreat_project1/Biomag2018/freesurfer'
# mne_data_path = '/Users/alex/work/data/retreat_project1/Biomag2018/original_data'
elif user == 'hichamjanati':
subjects_dir = '/Users/hichamjanati/Dropbox/Biomag2018_epilepsy_challenge/freesurfer'
mne_data_path = '/Users/hichamjanati/Dropbox/Biomag2018_epilepsy_challenge/original_data'
else:
subjects_dir = '/storage/store/data/biomag_challenge/Biomag2018/freesurfer'
mne_data_path = '/storage/store/data/biomag_challenge/Biomag2018/original_data'
subject_ids = ('226', '245', '251')
|
import pyrr
import glfw
import numpy as np
from math import sin, cos
from OpenGL.GL import *
from OpenGL.GL.shaders import compileProgram, compileShader
from lib.util.obj import ObjLoader
from lib.util.texture import load_texture
##############################################################################
# shaders
##############################################################################
vertex_src = """
# version 330
layout(location = 0) in vec3 a_position;
layout(location = 1) in vec2 a_texture;
layout(location = 2) in vec3 a_normal;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform mat4 normal_matrix;
out vec2 v_texture;
out vec3 v_normal;
void main()
{
gl_Position = view * model * vec4(a_position, 1.0);
v_texture = a_texture;
v_normal = normalize(mat3(normal_matrix) * a_normal); // using normal matrix
}
"""
fragment_src = """
# version 330
in vec2 v_texture;
in vec3 v_normal;
uniform vec3 camera_pos;
uniform vec3 camera_target;
uniform sampler2D s_texture;
out vec4 out_color;
void main()
{
vec3 normal = normalize(v_normal);
vec3 camera_dir = normalize(camera_pos - camera_target);
float sil = dot(normal, camera_dir);
if (sil < 0.2 && sil > -0.2)
out_color = vec4(1.0, 1.0, 1.0, 1.0);
else
out_color = texture(s_texture, v_texture);
}
"""
##############################################################################
##############################################################################
# glfw callback functions
def window_resize(window, width, height):
glViewport(0, 0, width, height)
projection = pyrr.matrix44.create_perspective_projection_matrix(45, width / height, 0.1, 100)
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection)
##############################################################################
# glfw
##############################################################################
if not glfw.init():
raise Exception("glfw can not be initialized!")
width, height = 1920, 1080
window = glfw.create_window(width, height, "Mesh Visualization", None, None)
if not window:
glfw.terminate()
raise Exception("glfw window can not be created!")
glfw.set_window_pos(window, 400, 200)
glfw.set_window_size_callback(window, window_resize)
glfw.make_context_current(window)
##############################################################################
##############################################################################
##############################################################################
# load model
##############################################################################
# mesh obj paths
face_obj_path = './assets/therock/Face.obj'
lefteye_obj_path = './assets/therock/LeftEye.obj'
righteye_obj_path = './assets/therock/RightEye.obj'
# mesh texture paths
face_tex_path = './assets/therock/textures/Texture_Face.jpg'
lefteye_tex_path = './assets/therock/textures/Texture_LeftEye.jpg'
righteye_tex_path = './assets/therock/textures/Texture_RightEye.jpg'
face_meta = ObjLoader.load_model(face_obj_path)
lefteye_meta = ObjLoader.load_model(lefteye_obj_path)
righteye_meta = ObjLoader.load_model(righteye_obj_path)
#================= FACE =================#
face_vertices = face_meta['v']
face_tex = face_meta['vt']
face_norms = face_meta['vn']
face_indices = face_meta['indices']
face_buffer = face_meta['buffer']
#================= EYES =================#
lefteye_indices = lefteye_meta['indices']
lefteye_buffer = lefteye_meta['buffer']
righteye_indices = righteye_meta['indices']
righteye_buffer = righteye_meta['buffer']
##############################################################################
##############################################################################
# compile the shader programs
shader = compileProgram(
compileShader(vertex_src, GL_VERTEX_SHADER),
compileShader(fragment_src, GL_FRAGMENT_SHADER)
)
##############################################################################
# VAO/VBO
##############################################################################
VAO = glGenVertexArrays(3)
VBO = glGenBuffers(3)
#================= FACE =================#
glBindVertexArray(VAO[0])
glBindBuffer(GL_ARRAY_BUFFER, VBO[0])
glBufferData(GL_ARRAY_BUFFER, face_buffer.nbytes, face_buffer, GL_STATIC_DRAW)
# face vertices (x, y, z)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, face_buffer.itemsize * 8, ctypes.c_void_p(0))
# face textures (u, v)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, face_buffer.itemsize * 8, ctypes.c_void_p(12))
# face normals (x, y, z)
glEnableVertexAttribArray(2)
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, face_buffer.itemsize * 8, ctypes.c_void_p(20))
glBindVertexArray(0)
#================= LEFT EYE =================#
glBindVertexArray(VAO[1])
glBindBuffer(GL_ARRAY_BUFFER, VBO[1])
glBufferData(GL_ARRAY_BUFFER, lefteye_buffer.nbytes, lefteye_buffer, GL_STATIC_DRAW)
# left eye vertices (x, y, z)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, lefteye_buffer.itemsize * 8, ctypes.c_void_p(0))
# left eye textures (u, v)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, lefteye_buffer.itemsize * 8, ctypes.c_void_p(12))
# left eye normals (x, y, z)
glEnableVertexAttribArray(2)
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, lefteye_buffer.itemsize * 8, ctypes.c_void_p(20))
glBindVertexArray(0)
#================= RIGHT EYE =================#
glBindVertexArray(VAO[2])
glBindBuffer(GL_ARRAY_BUFFER, VBO[2])
glBufferData(GL_ARRAY_BUFFER, righteye_buffer.nbytes, righteye_buffer, GL_STATIC_DRAW)
# left eye vertices (x, y, z)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, righteye_buffer.itemsize * 8, ctypes.c_void_p(0))
# left eye textures (u, v)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, righteye_buffer.itemsize * 8, ctypes.c_void_p(12))
# left eye normals (x, y, z)
glEnableVertexAttribArray(2)
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, righteye_buffer.itemsize * 8, ctypes.c_void_p(20))
glBindVertexArray(0)
##############################################################################
##############################################################################
##############################################################################
# textures
##############################################################################
textures = glGenTextures(3)
load_texture(face_tex_path, textures[0])
load_texture(lefteye_tex_path, textures[1])
load_texture(righteye_tex_path, textures[2])
##############################################################################
##############################################################################
##############################################################################
# setup/transformations
##############################################################################
glUseProgram(shader)
glClearColor(0, 0.1, 0.1, 1)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
scale = pyrr.Matrix44.from_scale((0.005, 0.005, 0.005))
translation = pyrr.matrix44.create_from_translation(pyrr.Vector3([0.0, 0.0, 0.0]))
model = pyrr.matrix44.multiply(translation, scale)
projection = pyrr.matrix44.create_perspective_projection_matrix(
fovy=45,
aspect=width/height,
near=0.1,
far=1000
)
normal_matrix = np.linalg.inv(model).T
model_loc = glGetUniformLocation(shader, "model")
view_loc = glGetUniformLocation(shader, "view")
proj_loc = glGetUniformLocation(shader, "projection")
normal_matrix_loc = glGetUniformLocation(shader, "normal_matrix")
camera_pos_loc = glGetUniformLocation(shader, "camera_pos")
camera_target_loc = glGetUniformLocation(shader, "camera_target")
glUniformMatrix4fv(model_loc, 1, GL_FALSE, model)
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection)
glUniformMatrix4fv(normal_matrix_loc, 1, GL_FALSE, normal_matrix)
##############################################################################
##############################################################################
##############################################################################
# main application loop
##############################################################################
while not glfw.window_should_close(window):
# clear the buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# move camera
radius = 0.1
camX = sin(0.5 * glfw.get_time()) * radius
camZ = cos(0.5 * glfw.get_time()) * radius
camera_position = pyrr.Vector3([camX, 0.0, camZ])
camera_target = pyrr.Vector3([0.0, 0.0, 0.0])
camera_up = pyrr.Vector3([0.0, 1.0, 0.0])
view = pyrr.matrix44.create_look_at(
eye=camera_position,
target=camera_target,
up=camera_up
)
#================= FACE =================#
glBindVertexArray(VAO[0])
glUniformMatrix4fv(view_loc, 1, GL_FALSE, view)
glUniform3fv(camera_pos_loc, 1, camera_position)
glUniform3fv(camera_target_loc, 1, camera_target)
glBindTexture(GL_TEXTURE_2D, textures[0])
glDrawArrays(GL_TRIANGLES, 0, len(face_indices))
glBindVertexArray(0)
#================= EYES =================#
glBindVertexArray(VAO[1])
glUniformMatrix4fv(view_loc, 1, GL_FALSE, view)
glUniform3fv(camera_pos_loc, 1, camera_position)
glUniform3fv(camera_target_loc, 1, camera_target)
glBindTexture(GL_TEXTURE_2D, textures[1])
glDrawArrays(GL_TRIANGLES, 0, len(lefteye_indices))
glBindVertexArray(0)
glBindVertexArray(VAO[2])
glUniformMatrix4fv(view_loc, 1, GL_FALSE, view)
glUniform3fv(camera_pos_loc, 1, camera_position)
glUniform3fv(camera_target_loc, 1, camera_target)
glBindTexture(GL_TEXTURE_2D, textures[2])
glDrawArrays(GL_TRIANGLES, 0, len(righteye_indices))
glBindVertexArray(0)
# swap front and back buffers | poll for and process events
glfw.swap_buffers(window)
glfw.poll_events()
glfw.terminate()
##############################################################################
##############################################################################
|
#
#
# Copyright [2015] [Benjamin Marks and Riley Collins]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
'''
parameters.py - Defines the parameters for a MRAnneal Instance.
Ben Marks, Riley Collins, Kevin Webb
Swarthmore College
MRAnneal
'''
# How many machines should we use?
# Higher Values => Generally, faster runtime, especially
# for computationally intensive annealing
# or seed solution generation.
# Lower Values => Less costly, more cluster resources available
# for other tasks.
#
# Recommended Value: 50
numMachines = 50
# How many results should be returned at the end?
#
# Recommended Value: 5
numFinalResults = 5
# Target percentage parameter: integer in range [0, 100]
# Higher Values => Longer runtime, higher scoring results
# Lower Values => Shorter runtime, lower scoring results
#
# Recommended Value: 65
targetPercentage = 45
###############################
## Advanced Parameters ##
###############################
# What are the bounds on the number of rounds that should run?
minRounds = 10 # Rounds in Phase 2, must be at least 3 for proper
# curve estimation in Phase 3.
maxRounds = 100 # Upper bound on rounds
# What are the bounds on the number of seed solutions to generate?
minSeedSolutions = 1000
maxSeedSolutions = 10000
# How many times should we anneal each non-finalized solution per round
numAnnealCalls = 150
|
"""Installation module."""
from setuptools import setup, find_packages
def main():
"""Code to be executed on install."""
setup(
name="qsimov-Mowstyl",
version="4.0.0",
author="Hernán Indíbil de la Cruz Calvo",
author_email="HernanIndibil.LaCruz@alu.uclm.es",
license="MIT",
packages=find_packages(include=['qsimov', 'qsimov.*']),
url="https://github.com/Mowstyl/QSimov",
description="QSimov Quantum computer simulator",
long_description="QSimov is a quantum computer simulator based on " +
"the circuit model.",
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords="quantum",
install_requires=[
"numpy>=1.17.4",
"doki-Mowstyl>=1.3.0"
]
)
if __name__ == "__main__":
main()
|
"""
Gamma matrices
"""
__all__ = [
"numpy",
"outer",
"gamma_matrices",
"requires_numpy",
]
from .extensions import lazy_import, raiseif
try:
numpy = lazy_import("numpy")
_err = None
except ImportError as err:
numpy = None
_err = err
requires_numpy = raiseif(numpy is None, _err)
@requires_numpy
def outer(left, right):
"Outer product between two arrays"
return numpy.kron(left, right)
@requires_numpy
def gamma_matrices(dim=4, euclidean=True):
"Based on https://en.wikipedia.org/wiki/Higher-dimensional_gamma_matrices"
assert dim > 0 and isinstance(dim, int)
sigmas = (
numpy.array(((0, 1), (1, 0))),
numpy.array(((0, -1j), (1j, 0))),
numpy.array(((1, 0), (0, -1))),
)
chiral = sigmas[2]
gammas = [sigmas[0], (-1 if euclidean else -1j) * sigmas[1]]
for idx in range((dim + 1) // 2 - 1):
new = []
for gamma in gammas:
new.append(outer(gamma, sigmas[-1]))
new.append(
outer(numpy.identity(2 ** (idx + 1)), (1 if euclidean else 1j) * sigmas[0])
)
new.append(
outer(numpy.identity(2 ** (idx + 1)), (1 if euclidean else 1j) * sigmas[1])
)
gammas = new
chiral = outer(chiral, sigmas[2])
if dim % 2:
if euclidean:
gammas[-1] *= 1j
return tuple(gammas)
gammas.append(chiral)
return tuple(gammas)
|
from typing import List
# Replace pass with your solution
def ways_up(n: int, step_sizes: List[int] = [1, 2]) -> int:
"""Return the number of ways to get up the stairs
Calculate the number of paths up the stairs, where:
- `n` is the size of the staircase, i.e. the number of stairs
- `step_sizes` is a list containing the step sizes you are allowed to
take
Examples
--------
>>> ways_up(1)
1
>>> ways_up(5)
8
>>> ways_up(5, step_sizes=[1, 3, 5])
5
"""
pass
|
import candle
import iisignature
import torch
import torch.autograd as autograd
import torch.nn as nn
import warnings
def sig_dim(alphabet_size, depth):
"""Calculates the number of terms in a signature of depth :depth: over an alphabet of size :alphabet_size:."""
return int(alphabet_size * (1 - alphabet_size ** depth) / (1 - alphabet_size))
# == sum(alphabet_size ** i for i in range(1, depth + 1)) (geometric sum formula)
class path_sig_fn(autograd.Function):
"""An autograd.Function corresponding to the signature map. See also siglayer/backend/pytorch_implementation.py."""
@staticmethod
def forward(ctx, path, depth):
device = path.device
# transpose because the PyTorch convention for convolutions is channels first. The iisignature expectation is
# that channels are last.
path = path.detach().cpu().numpy().transpose() # sloooow CPU :(
ctx.path = path
ctx.depth = depth
return torch.tensor(iisignature.sig(path, depth), dtype=torch.float, device=device)
@staticmethod
def backward(ctx, grad_output):
device = grad_output.device
backprop = iisignature.sigbackprop(grad_output.cpu().numpy(), ctx.path, ctx.depth)
# transpose again to go back to the PyTorch convention of channels first
out = torch.tensor(backprop, dtype=torch.float, device=device).t()
# better safe than sorry
# https://discuss.pytorch.org/t/when-should-you-save-for-backward-vs-storing-in-ctx/6522/9
# not sure this is actually necessary though
del ctx.path
del ctx.depth
return out, None
def path_sig(path, depth):
"""Calculates the signature transform of a :path: to signature depth :depth:."""
return path_sig_fn.apply(path, depth)
batch_path_sig = candle.batch_fn(path_sig)
class Signature(nn.Module):
"""Given some path mapping from, say, [0, 1] into \reals^d, we may define the 'signature' of the path as a
particular sigtensor with respect to an alphabet of n letters. (Note how d is the target dimension of the path.)
That is, the signature is a map from the space of paths to the tensor algebra. Up to certain mathematical niceties,
this map may be inverted; the signature is sufficient to define the path. (Technically speaking, it defines the path
up to 'tree-like equivalence': this means that the signature does not pick up on back-tracking)
Thus the signature is a natural way to characterise a path; in the language of machine learning is an excellent
feature map.
Given a tensor of shape (x, y), then one may interpret this a piecewise constant path from [0, x] into \reals^y,
changing its value at each integer. Whether this is a natural interpretation depends on the data that the tensor
represents, of course, but this allows for taking the signature of a tensor, which is precisely what this Module
does.
"""
def __init__(self, depth, **kwargs):
if not isinstance(depth, candle.Integer) or depth < 1:
raise ValueError(f'Depth must be an integer greater than or equal to one. Given {depth} of type '
f'{type(depth)}')
super(Signature, self).__init__(**kwargs)
self.depth = depth
def forward(self, path):
if path.size(1) == 1:
warnings.warn(f'{self.__class__.__name__} called on path with only one channel; the signature is now just '
f'the moments of the path, so there is no interesting information from cross terms.')
# path is expected to be a 3-dimensional tensor, with batch, channel and length axes respectively, say of shape
# (b, c, l). Each batch element is treated separately. Then values are interpreted as l sample points from a
# path in \reals^c
return batch_path_sig(path, depth=self.depth)
def extra_repr(self):
return f'depth={self.depth}'
|
#!/usr/bin/env python3
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import opendbpy as odb
parser = argparse.ArgumentParser(
description='Add cell power connections in the netlist. Useful for LVS purposes.')
parser.add_argument('--input-def', '-d', required=True,
help='DEF view of the design')
parser.add_argument('--input-lef', '-l', required=True,
help='LEF file needed to have a proper view of the design.\
Every cell having a pin labeled as a power pin (e.g., USE POWER) will\
be connected to the power/ground port of the design')
parser.add_argument('--power-port', '-v',
help='Name of the power port of the design. The power pin of the\
subcells will be conneted to it')
parser.add_argument('--ground-port', '-g',
help='Name of the ground port of the design. The ground pin of the\
subcells will be conneted to it')
parser.add_argument('--output', '-o',
default='output.def', help='Output modified netlist')
parser.add_argument('--ignore-missing-pins', '-q', action='store_true', required=False)
# parser.add_argument('--create-pg-ports',
# help='Create power and ground ports if not found')
args = parser.parse_args()
def_file_name = args.input_def
lef_file_name = args.input_lef
power_port_name = args.power_port
ground_port_name = args.ground_port
ignore_missing_pins = args.ignore_missing_pins
output_file_name = args.output
db = odb.dbDatabase.create()
odb.read_lef(db, lef_file_name)
odb.read_def(db, def_file_name)
chip = db.getChip()
block = chip.getBlock()
design_name = block.getName()
print("Top-level design name:", design_name)
VDD = None
GND = None
ports = block.getBTerms()
for port in ports:
if port.getSigType() == "POWER" or port.getName() == power_port_name:
print("Found port", port.getName(), "of type", port.getSigType())
VDD = port
elif port.getSigType() == "GROUND" or port.getName() == ground_port_name:
print("Found port", port.getName(), "of type", port.getSigType())
GND = port
if None in [VDD, GND]: # and not --create-pg-ports
print("Error: No power ports found at the top-level. Make sure that they exist\
and have the USE POWER|GROUND property or they match the argumens\
specified with --power-port and --ground-port")
exit(1)
VDD = VDD.getNet()
GND = GND.getNet()
print("Power net: ", VDD.getName())
print("Ground net:", GND.getName())
modified_cells = 0
cells = block.getInsts()
for cell in cells:
iterms = cell.getITerms()
if len(iterms) == 0:
continue
VDD_ITERMS = []
GND_ITERMS = []
VDD_ITERM_BY_NAME = None
GND_ITERM_BY_NAME = None
for iterm in iterms:
if iterm.getSigType() == "POWER":
VDD_ITERMS.append(iterm)
elif iterm.getSigType() == "GROUND":
GND_ITERMS.append(iterm)
elif iterm.getMTerm().getName() == power_port_name:
VDD_ITERM_BY_NAME = iterm
elif iterm.getMTerm().getName() == ground_port_name: # note **PORT**
GND_ITERM_BY_NAME = iterm
if len(VDD_ITERMS) == 0:
print("Warning: No pins in the LEF view of", cell.getName(), " marked for use as power")
print("Warning: Attempting to match power pin by name (using top-level port name) for cell:", cell.getName())
if VDD_ITERM_BY_NAME is not None: # note **PORT**
print("Found", power_port_name, "using that as a power pin")
VDD_ITERMS.append(VDD_ITERM_BY_NAME)
if len(GND_ITERMS) == 0:
print("Warning: No pins in the LEF view of", cell.getName(), " marked for use as ground")
print("Warning: Attempting to match ground pin by name (using top-level port name) for cell:", cell.getName())
if GND_ITERM_BY_NAME is not None: # note **PORT**
print("Found", ground_port_name, "using that as a ground pin")
GND_ITERMS.append(GND_ITERM_BY_NAME)
if len(VDD_ITERMS) == 0 or len(GND_ITERMS) == 0:
print("Warning: not all power pins found for cell:", cell.getName())
if ignore_missing_pins:
print("Warning: ignoring", cell.getName(), "!!!!!!!")
continue
else:
print("Exiting... Use --ignore-missing-pins to ignore such errors")
sys.exit(1)
for VDD_ITERM in VDD_ITERMS:
if VDD_ITERM.isConnected():
pin_name = VDD_ITERM.getMTerm().getName()
cell_name = cell.getName()
print("Warning: power pin", pin_name, "of", cell_name, "is already connected")
print("Warning: ignoring", cell_name + "/" + pin_name, "!!!!!!!")
else:
odb.dbITerm_connect(VDD_ITERM, VDD)
for GND_ITERM in GND_ITERMS:
if GND_ITERM.isConnected():
pin_name = GND_ITERM.getMTerm().getName()
cell_name = cell.getName()
print("Warning: ground pin", pin_name, "of", cell_name, "is already connected")
print("Warning: ignoring", cell_name + "/" + pin_name, "!!!!!!!")
else:
odb.dbITerm_connect(GND_ITERM, GND)
modified_cells += 1
print("Modified power connections of", modified_cells, "cells (Remaining:",
len(cells)-modified_cells,
").")
odb.write_def(block, output_file_name)
|
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework_inclusions.renderer import InclusionJSONRenderer
from .models import (
Basic,
BasicM2M,
C,
Child,
ChildProps,
Container,
E,
Entry,
MainObject,
ModelWithOptionalSub,
ModelWithProperty,
Parent,
Tag,
)
from .serializers import (
BasicM2MSerializer,
BasicSerializer,
ChildPropsSerializer2,
ChildSerializer,
ChildSerializer2,
ChildSerializer3,
CInclusionSerializer,
ContainerSerializer,
CSerializer,
EntryReadOnlyTagsSerializer,
ESerializer,
MainObjectSerializer,
ModelWithOptionalSubSerializer,
ModelWithPropertySerializer,
ParentSerializer,
TagSerializer,
)
class CommonMixin:
permission_classes = ()
authentication_classes = ()
renderer_classes = (InclusionJSONRenderer,)
serializer_class = TagSerializer
class BasicViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = BasicSerializer
queryset = Basic.objects.all()
@action(detail=False)
def many(self, request, *args, **kwargs):
serializer = BasicSerializer(
instance=Basic.objects.all().order_by("id"), many=True
)
return Response(serializer.data)
class BasicM2MViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = BasicM2MSerializer
queryset = BasicM2M.objects.all()
class TagViewSet(CommonMixin, viewsets.ModelViewSet):
queryset = Tag.objects.all()
pagination_class = None
@action(detail=False)
def custom_action(self, request):
serializer = TagSerializer(self.get_queryset(), many=True)
return Response(serializer.data)
class ParentViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = ParentSerializer
queryset = Parent.objects.all()
@action(detail=False, methods=["post"])
def check(self, request):
return Response({"arbitrary": "content"})
@action(detail=True, methods=["post"])
def check2(self, request, **kw):
return Response({"arbitrary": "content"})
class ChildViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = ChildSerializer
queryset = Child.objects.all()
class ChildViewSet2(CommonMixin, viewsets.ModelViewSet):
serializer_class = ChildSerializer2
queryset = Child.objects.all()
class ChildViewSet3(CommonMixin, viewsets.ModelViewSet):
serializer_class = ChildSerializer3
queryset = Child.objects.all()
class ChildPropsViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = ChildPropsSerializer2
queryset = ChildProps.objects.all()
class ContainerViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = ContainerSerializer
queryset = Container.objects.all()
class EntryViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = EntryReadOnlyTagsSerializer
queryset = Entry.objects.all()
class CViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = CSerializer
queryset = C.objects.all()
@action(detail=True)
def custom_action(self, request, *args, **kwargs):
serializer = CSerializer(self.get_object())
return Response(serializer.data)
class CDirectNestedInclusionViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = CInclusionSerializer
queryset = C.objects.all()
pagination_class = None
class MainObjectViewSet(CommonMixin, viewsets.ModelViewSet):
queryset = MainObject.objects.all()
serializer_class = MainObjectSerializer
pagination_class = None
class EViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = ESerializer
queryset = E.objects.all()
class ModelWithPropertyViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = ModelWithPropertySerializer
queryset = ModelWithProperty.objects.all()
class ModelWithOptionalSubViewSet(CommonMixin, viewsets.ModelViewSet):
serializer_class = ModelWithOptionalSubSerializer
queryset = ModelWithOptionalSub.objects.all()
|
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import images
from google.appengine.api.labs import taskqueue
from google.appengine.api.urlfetch import DownloadError
from google.appengine.api.datastore_errors import BadRequestError
from google.appengine.runtime import DeadlineExceededError
from os import environ
import json
import urllib
import logging
import time
import foursquarev2 as foursquare
import constants
import time
from datetime import datetime
from scripts import manage_foursquare_data
from gheatae import color_scheme, tile, provider
from models import UserInfo, UserVenue, MapImage
class IndexHandler(webapp.RequestHandler):
def get(self):
welcome_data = {
'user': '',
'userinfo': '',
'url': users.create_login_url(self.request.uri),
'real_name': '',
'photo_url': constants.default_photo,
'is_ready': False
}
sidebar_data = {
'color_scheme_dict': color_scheme.color_schemes,
'color_scheme': constants.default_color,
}
map_data = {
'citylat': constants.default_lat,
'citylng': constants.default_lng,
'zoom': constants.default_zoom,
'width': constants.default_dimension,
'height': constants.default_dimension,
'domain': environ['HTTP_HOST'],
'static_url': 'http://maps.google.com/maps/api/staticmap?center=40.738152838822934%2C-73.9822769165039&format=png&zoom=13&key=ABQIAAAAwA6oEsCLgzz6I150wm3ELBSujOi3smKLcjzph36ZE8UXngM_5BTs-xHblsuwK8V9g8bZ_PTfOWR1Fg&sensor=false&size=640x640',
'mapimage_url': 'map/%s.png' % 'ag93aGVyZS1kby15b3UtZ29yEQsSCE1hcEltYWdlGNL0_wIM',
}
user = users.get_current_user()
if user:
welcome_data['user'] = user
welcome_data['url'] = users.create_logout_url(self.request.uri)
userinfo = UserInfo.all().filter('user =', user).get()
if userinfo:
welcome_data['userinfo'] = userinfo
welcome_data['real_name'] = userinfo.real_name
welcome_data['photo_url'] = userinfo.photo_url
welcome_data['is_ready'] = userinfo.is_ready
sidebar_data['color_scheme'] = userinfo.color_scheme
map_data['citylat'] = userinfo.citylat
map_data['citylng'] = userinfo.citylng
os_path = os.path.dirname(__file__)
self.response.out.write(template.render(os.path.join(os_path, 'templates/all_header.html'), {'key': constants.google_maps_apikey}))
self.response.out.write(template.render(os.path.join(os_path, 'templates/private_welcome.html'), welcome_data))
if user and userinfo:
if userinfo.has_been_cleared:
self.response.out.write(template.render(os.path.join(os_path, 'templates/information.html'), {'user': user, 'has_been_cleared': userinfo.has_been_cleared}))
elif userinfo.is_authorized:
self.response.out.write(template.render(os.path.join(os_path, 'templates/private_sidebar.html'), sidebar_data))
else:
self.response.out.write(template.render(os.path.join(os_path, 'templates/private_unauthorized.html'), None))
else:
self.response.out.write(template.render(os.path.join(os_path, 'templates/information.html'), {'user': user, 'has_been_cleared': False}))
self.response.out.write(template.render(os.path.join(os_path, 'templates/private_map.html'), map_data))
self.response.out.write(template.render(os.path.join(os_path, 'templates/all_footer.html'), None))
class InformationWriter(webapp.RequestHandler): #NOTE this defaults to the has_been_cleared case for now, since that's the only one that's used
def get(self):
user = users.get_current_user()
os_path = os.path.dirname(__file__)
self.response.out.write(template.render(os.path.join(os_path, 'templates/information.html'), {'user': user, 'has_been_cleared': True}))
class AuthHandler(webapp.RequestHandler):
def _get_new_fs_and_credentials(self):
consumer_key, oauth_secret, url = constants.get_oauth_strings()
fs = foursquare.FoursquareAuthHelper(key=consumer_key, secret=oauth_secret, redirect_uri=url)
return fs, None
def get(self):
user = users.get_current_user()
if user:
code = self.request.get("code")
if code:
old_userinfos = UserInfo.all().filter('user =', user).fetch(500)
db.delete(old_userinfos)
fs, credentials = self._get_new_fs_and_credentials()
try:
user_token = fs.get_access_token(code)
userinfo = UserInfo(user = user, token = user_token, secret = None, is_ready=False, is_authorized=True, level_max=int(3 * constants.level_const))
except DownloadError, err:
if str(err).find('ApplicationError: 5') >= 0:
pass # if something bad happens on OAuth, then it currently just redirects to the signup page
#TODO find a better way to handle this case, but it's not clear there is a simple way to do it without messing up a bunch of code
else:
raise err
try:
manage_foursquare_data.update_user_info(userinfo)
manage_foursquare_data.fetch_and_store_checkins_next(userinfo, limit=50)
except foursquare.FoursquareRemoteException, err:
if str(err).find('403 Forbidden') >= 0:
pass # if a user tries to sign up while my app is blocked, then it currently just redirects to the signup page
#TODO find a better way to handle this case, but it's not clear there is a simple way to do it without messing up a bunch of code
else:
raise err
except DownloadError:
pass #TODO make this better, but I'd rather throw the user back to the main page to try again than show the user an error.
self.redirect("/")
else:
fs, credentials = self._get_new_fs_and_credentials()
self.redirect(fs.get_authentication_url())
else:
self.redirect(users.create_login_url(self.request.uri))
class StaticMapHandler(webapp.RequestHandler):
def get(self):
path = environ['PATH_INFO']
if path.endswith('.png'):
raw = path[:-4] # strip extension
try:
assert raw.count('/') == 2, "%d /'s" % raw.count('/')
foo, bar, map_key = raw.split('/')
except AssertionError, err:
logging.error(err.args[0])
return
else:
logging.error("Invalid path: " + path)
return
mapimage = convert_map_key(map_key)
if mapimage:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(mapimage.img)
else:
self.redirect("/")
class TileHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
path = environ['PATH_INFO']
if path.endswith('.png'):
raw = path[:-4] # strip extension
try:
assert raw.count('/') == 4, "%d /'s" % raw.count('/')
foo, bar, layer, zoom, yx = raw.split('/') #tile is ignored, is just here to prevent caching
assert yx.count(',') == 1, "%d /'s" % yx.count(',')
y, x = yx.split(',')
assert zoom.isdigit() and x.isdigit() and y.isdigit(), "not digits"
zoom = int(zoom)
x = int(x)
y = int(y)
assert constants.min_zoom <= zoom <= constants.max_zoom, "bad zoom: %d" % zoom
except AssertionError, err:
logging.error(err.args[0])
self.respondError(err)
return
else:
self.respondError("Invalid path")
return
start = datetime.now()
try:
new_tile = tile.GoogleTile(user, zoom, x, y)
img_data = new_tile.image_out()
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(img_data)
except DeadlineExceededError, err:
logging.warning('%s error - started at %s, failed at %s' % (str(err), start, datetime.now()))
self.response.headers['Content-Type'] = "image/png"
self.response.out.write('')
class PublicPageHandler(webapp.RequestHandler):
def get(self):
path = environ['PATH_INFO']
if path.endswith('.html'):
raw = path[:-5] # strip extension
try:
assert raw.count('/') == 2, "%d /'s" % raw.count('/')
foo, bar, map_key = raw.split('/')
except AssertionError, err:
logging.error(err.args[0])
return
else:
logging.error("Invalid path: " + path)
return
mapimage = convert_map_key(map_key)
if mapimage:
welcome_data = {
'real_name': '',
'photo_url': constants.default_photo,
}
sidebar_data = {
'domain': environ['HTTP_HOST'],
'public_url': 'public/%s.html' % mapimage.key(),
}
map_data = {
'domain': environ['HTTP_HOST'],
'static_url': mapimage.static_url,
'mapimage_url': 'map/%s.png' % mapimage.key(),
}
userinfo = UserInfo.all().filter('user =', mapimage.user).get()
if userinfo:
welcome_data['real_name'] = userinfo.real_name
welcome_data['photo_url'] = userinfo.photo_url
#welcome_data['checkin_count'] = userinfo.checkin_count
os_path = os.path.dirname(__file__)
self.response.out.write(template.render(os.path.join(os_path, 'templates/all_header.html'), None))
self.response.out.write(template.render(os.path.join(os_path, 'templates/public_welcome.html'), welcome_data))
self.response.out.write(template.render(os.path.join(os_path, 'templates/public_sidebar.html'), sidebar_data))
self.response.out.write(template.render(os.path.join(os_path, 'templates/public_map.html'), map_data))
self.response.out.write(template.render(os.path.join(os_path, 'templates/all_footer.html'), None))
else:
self.redirect("/")
class StaticMapHtmlWriter(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
mapimage = MapImage.all().filter('user =', user).get()
if mapimage:
template_data = {
'domain': environ['HTTP_HOST'],
'static_url': mapimage.static_url,
'mapimage_url': 'map/%s.png' % mapimage.key(),
'public_url': 'public/%s.html' % mapimage.key(),
'timestamp': str(time.time())
}
os_path = os.path.dirname(__file__)
self.response.out.write(template.render(os.path.join(os_path, 'templates/static_map.html'), template_data))
else:
self.response.out.write("")
class UserReadyEndpoint(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
userinfo = UserInfo.all().filter('user =', user).get()
if userinfo:
self.response.out.write(str(userinfo.has_been_cleared) + ',' + str(userinfo.is_ready) + ',' + str(userinfo.checkin_count))
return
self.response.out.write('error')
class MapDoneEndpoint(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
mapimage = MapImage.all().filter('user =', user).get()
if mapimage:
self.response.out.write(str(mapimage.tiles_remaining == 0))
return
self.response.out.write('error')
def convert_map_key(map_key):
try:
return db.get(map_key)
except BadRequestError, err:
if err.message == 'app s~where-do-you-go-hrd cannot access app where-do-you-go\'s data':
old_key = db.Key(map_key)
new_key = db.Key.from_path(old_key.kind(), old_key.id())
return db.get(new_key)
else:
raise BadRequestError, err
def main():
application = webapp.WSGIApplication([('/', IndexHandler),
('/go_to_foursquare', AuthHandler),
('/authenticated', AuthHandler),
('/tile/.*', TileHandler),
('/map/.*', StaticMapHandler),
('/public/.*', PublicPageHandler),
('/information', InformationWriter),
('/static_map_html', StaticMapHtmlWriter),
('/user_is_ready/.*', UserReadyEndpoint),
('/map_is_done/', MapDoneEndpoint)],
debug=True)
constants.provider = provider.DBProvider()
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
from django.shortcuts import render, redirect, reverse
from django.http import HttpResponse
from django.views import generic
from .models import Settlement
class IndexView(generic.ListView):
template_name = 'settlement/index.html'
context_object_name = 'settlement_list'
def get_queryset(self):
return Settlement.objects.order_by('id')
class DetailView(generic.DetailView):
model = Settlement
template_name = 'settlement/detail.html'
class EditView(generic.UpdateView):
model = Settlement
template_name = 'settlement/edit.html'
fields = ['name', 'population', 'category',
'location', 'knownfor', 'economy']
class NewView(generic.CreateView):
model = Settlement
template_name = 'settlement/new.html'
fields = [
'name', 'population', 'category', 'location', 'knownfor', 'economy']
|
# coding:utf-8
# 2019-1-25
# sklearn 常用回归函数
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge, RidgeCV, RANSACRegressor
from sklearn.linear_model import Lasso
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import ElasticNet, SGDRegressor, BayesianRidge
from sklearn.cross_decomposition import PLSCanonical
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor, BaggingRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from xgboost import XGBRegressor
models = [LinearRegression(),
Ridge(), # http://www.cnblogs.com/pinard/p/6023000.html
Lasso(alpha=0.01, max_iter=10000), # https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html
RandomForestRegressor(), # https://scikit-learn.org/dev/modules/generated/sklearn.ensemble.RandomForestRegressor.html
GradientBoostingRegressor(), # https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
SVR(), # https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html#sklearn.svm.SVR
LinearSVR(), # https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVR.html
ElasticNet(alpha=0.001, max_iter=10000), # https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html
SGDRegressor(max_iter=10000, tol=1e-3), # https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html
BayesianRidge(), #
KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5), # https://scikit-learn.org/stable/modules/generated/sklearn.kernel_ridge.KernelRidge.html
ExtraTreesRegressor(), # https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html
XGBRegressor(),
AdaBoostRegressor(n_estimators=50), # https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostRegressor.html
BaggingRegressor(), # https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingRegressor.html
DecisionTreeRegressor(), #https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html
KNeighborsRegressor()] # https://scikit-learn.org/0.18/modules/generated/sklearn.neighbors.KNeighborsRegressor.html
for m in models:
print("- {}".format(m.__class__.__name__))
|
import argparse
import os
import shlex
import tempfile
import pytest
import west.cmd.project
MANIFEST_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'manifest.yml')
)
# Where the projects are cloned to
NET_TOOLS_PATH = 'net-tools'
KCONFIGLIB_PATH = 'sub/kconfiglib'
COMMAND_OBJECTS = (
west.cmd.project.ListProjects(),
west.cmd.project.Fetch(),
west.cmd.project.Pull(),
west.cmd.project.Rebase(),
west.cmd.project.Branch(),
west.cmd.project.Checkout(),
west.cmd.project.Diff(),
west.cmd.project.Status(),
west.cmd.project.ForAll(),
)
def cmd(cmd):
cmd += ' -m ' + MANIFEST_PATH
# cmd() takes the command as a string, which is less clunky to work with.
# Split it according to shell rules.
split_cmd = shlex.split(cmd)
command_name = split_cmd[0]
for command_object in COMMAND_OBJECTS:
# Find the WestCommand object that implements the command
if command_object.name == command_name:
# Use it to parse the arguments
parser = argparse.ArgumentParser()
command_object.do_add_parser(parser.add_subparsers())
# Pass the parsed arguments and unknown arguments to run it
command_object.do_run(*parser.parse_known_args(shlex.split(cmd)))
break
else:
assert False, "unknown command " + command_name
@pytest.fixture
def clean_west_topdir(tmpdir):
tmpdir.mkdir('.west')
tmpdir.chdir()
def test_list_projects(clean_west_topdir):
# TODO: Check output
cmd('list-projects')
def test_fetch(clean_west_topdir):
# Clone all projects
cmd('fetch')
# Check that they got cloned
assert os.path.isdir(NET_TOOLS_PATH)
assert os.path.isdir(KCONFIGLIB_PATH)
# Non-existent project
with pytest.raises(SystemExit):
cmd('fetch non-existent')
# Update a specific project
cmd('fetch net-tools')
def test_pull(clean_west_topdir):
# Clone all projects
cmd('pull')
# Check that they got cloned
assert os.path.isdir(NET_TOOLS_PATH)
assert os.path.isdir(KCONFIGLIB_PATH)
# Non-existent project
with pytest.raises(SystemExit):
cmd('pull non-existent')
# Update a specific project
cmd('pull net-tools')
def test_rebase(clean_west_topdir):
# Clone just one project
cmd('fetch net-tools')
# Piggyback a check that just that project got cloned
assert not os.path.exists(KCONFIGLIB_PATH)
# Rebase the project (non-cloned project should be silently skipped)
cmd('rebase')
# Rebase the project again, naming it explicitly
cmd('rebase net-tools')
# Try rebasing a project that hasn't been cloned
with pytest.raises(SystemExit):
cmd('pull rebase Kconfiglib')
# Clone the other project
cmd('pull Kconfiglib')
# Will rebase both projects now
cmd('rebase')
def test_branches(clean_west_topdir):
# Missing branch name
with pytest.raises(SystemExit):
cmd('checkout')
# Clone just one project
cmd('fetch net-tools')
# Create a branch in the cloned project
cmd('branch foo')
# Check out the branch
cmd('checkout foo')
# Check out the branch again, naming the project explicitly
cmd('checkout foo net-tools')
# Try checking out a branch that doesn't exist in any project
with pytest.raises(SystemExit):
cmd('checkout nonexistent')
# Try checking out a branch in a non-cloned project
with pytest.raises(SystemExit):
cmd('checkout foo Kconfiglib')
# Clone the other project
cmd('fetch Kconfiglib')
# It still doesn't have the branch
with pytest.raises(SystemExit):
cmd('checkout foo Kconfiglib')
# Create a differently-named branch it
cmd('branch bar Kconfiglib')
# That branch shouldn't exist in the other project
with pytest.raises(SystemExit):
cmd('checkout bar net-tools')
# It should be possible to check out each branch even though they only
# exists in one project
cmd('checkout foo')
cmd('checkout bar')
# List all branches and the projects they appear in (TODO: Check output)
cmd('branch')
def test_diff(clean_west_topdir):
# TODO: Check output
# Diff with no projects cloned shouldn't fail
cmd('diff')
# Neither should it fail after fetching one or both projects
cmd('fetch net-tools')
cmd('diff')
cmd('fetch Kconfiglib')
cmd('diff --cached') # Pass a custom flag too
def test_status(clean_west_topdir):
# TODO: Check output
# Status with no projects cloned shouldn't fail
cmd('status')
# Neither should it fail after fetching one or both projects
cmd('fetch net-tools')
cmd('status')
cmd('fetch Kconfiglib')
cmd('status --long') # Pass a custom flag too
def test_forall(clean_west_topdir):
# TODO: Check output
# The 'echo' command is available in both 'shell' and 'batch'
# 'forall' with no projects cloned shouldn't fail
cmd("forall -c 'echo *'")
# Neither should it fail after fetching one or both projects
cmd('fetch net-tools')
cmd("forall -c 'echo *'")
cmd('fetch Kconfiglib')
cmd("forall -c 'echo *'")
|
import logging
import unittest
from io import BytesIO
from datetime import datetime
from goodline_iptv.jtvfile import JtvFile
from goodline_iptv.xmltv import XmltvBuilder
from goodline_iptv.test.jtv_data import TV_ZIP
from teamcity import is_running_under_teamcity
from teamcity.unittestpy import TeamcityTestRunner
logging.basicConfig(
handlers=[
logging.NullHandler()
]
)
log = logging.getLogger()
class TestJtv(unittest.TestCase):
def setUp(self):
self.channels = list(JtvFile(BytesIO(TV_ZIP), log).channels)
def test_channelCount(self):
self.assertEqual(len(self.channels), 1)
def test_channelId(self):
self.assertEqual(self.channels[0].track_id, '0')
def test_tracks(self):
tracks = list(self.channels[0].tracks)
self.assertEqual(len(tracks), 166)
self.assertEqual(tracks[0][0], datetime(2019, 3, 4, 5, 0))
self.assertEqual(tracks[0][1], 'Телеканал "Доброе утро"')
self.assertEqual(tracks[165][0], datetime(2019, 3, 11, 4, 5))
self.assertEqual(tracks[165][1], 'Давай поженимся! (16+)')
class TestXmltv(unittest.TestCase):
def setUp(self):
self.xml = XmltvBuilder()
self.xml.add_channel('0', '1 канал, канает и будет канать', '1channel.png')
self.xml.add_track('0', datetime(2019, 3, 4, 5, 0), datetime(2019, 3, 11, 5, 15), 'Утренняя зарядка воды с Аланом Чумаком')
self.xml.add_track('0', datetime(2019, 3, 4, 5, 20), datetime(2019, 3, 11, 6, 20), 'Чушь с Владимиром Познером')
self.xml.add_track('0', datetime(2019, 3, 4, 6, 30), datetime(2019, 3, 11, 7, 15), 'Православный приговор')
self.xml.add_track('0', datetime(2019, 3, 4, 7, 20), datetime(2019, 3, 11, 23, 0), 'Очередное Ток-шоу "Что там у хохлов"')
def test_build(self):
result = ('<?xml version="1.0" ?>\n'
'<tv generator-info-name="Preved" generator-info-url="http://www.medved.info">\n'
' <channel id="0">\n'
' <display-name>1 канал, канает и будет канать</display-name>\n'
' <icon>1channel.png</icon>\n'
' </channel>\n'
' <programme channel="0" start="20190304050000 +0700" stop="20190311051500 +0700">\n'
' <title lang="ru">Утренняя зарядка воды с Аланом Чумаком</title>\n'
' </programme>\n'
' <programme channel="0" start="20190304052000 +0700" stop="20190311062000 +0700">\n'
' <title lang="ru">Чушь с Владимиром Познером</title>\n'
' </programme>\n'
' <programme channel="0" start="20190304063000 +0700" stop="20190311071500 +0700">\n'
' <title lang="ru">Православный приговор</title>\n'
' </programme>\n'
' <programme channel="0" start="20190304072000 +0700" stop="20190311230000 +0700">\n'
' <title lang="ru">Очередное Ток-шоу "Что там у хохлов"</title>\n'
' </programme>\n'
'</tv>\n')
self.assertEqual(self.xml.to_string(), result)
if __name__ == '__main__':
if is_running_under_teamcity():
runner = TeamcityTestRunner()
else:
runner = unittest.TextTestRunner()
unittest.main(testRunner=runner, verbosity=2)
|
import sys
import os
from tkinter import Button
from PyQt6.QtGui import *
from PyQt6.QtWidgets import *
from PyQt6.QtCore import *
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Célia Cachos')
self.setWindowIcon(QIcon('imagens/logo.png'))
self.setFixedSize(QSize(1000,500))
btn = QPushButton("Button") #create the btn
btn.setFixedSize(QSize(100,100)) #changes btn size
btn.setCheckable(True) #makes btn clickable
btn.clicked.connect(self.the_button_was_clicked) #action when clicked
btn.clicked.connect(self.the_button_was_toggled)
self.setCentralWidget(btn) #Shows btn
def the_button_was_clicked(self):
print("Clicked!")
def the_button_was_toggled(self, checked):
print("Checked?", checked)
app = QApplication(sys.argv)
main_window = MainWindow() #Defines the main_window
main_window.show() #Show the main_window
app.exec() #Executes in loop the application
|
import click
import codecs
import csv
import numpy as np
import os.path
import glob
from music21 import corpus
from sklearn.decomposition import PCA
from collections import defaultdict, Counter
from datasets import build_vocabulary
from constants import *
@click.group()
def score():
"""Tools for scoring MusicXML outputs."""
pass
@click.command()
@click.option('-o', '--out-file', type=click.File('wb'), default=open(SCRATCH_DIR + '/harm_results.csv', 'wb'))
@click.pass_context
def harm_error_rate(ctx, out_file):
num_correct = defaultdict(Counter)
num_total = defaultdict(Counter)
error_types = ['TER', 'FER']
for output in glob.glob(os.path.join(SCRATCH_DIR, 'harm_out', '*-mask-*.utf'))[:30]:
harm_fname = os.path.basename(output)
orig_fname, parts_mask = extract_BWV(harm_fname)
masked = os.path.join(SCRATCH_DIR, harm_fname)
reference = os.path.join(SCRATCH_DIR, orig_fname)
error_rates = ctx.invoke(
harm_error_rate_single,
output=output, masked=masked, reference=reference)
for error_type, (n_correct, n_total) in zip(error_types, error_rates):
num_correct[error_type][parts_mask] += n_correct
num_total[error_type][parts_mask] += n_total
outwriter = csv.writer(out_file)
for t in error_types:
for parts_mask in num_correct[t]:
outwriter.writerow((t, parts_mask, float(num_correct[t][parts_mask]) / num_total[t][parts_mask]))
print 'Wrote results to {}'.format(out_file.name)
def extract_BWV(fname):
parts = fname.split('-')
mask_idx = parts.index('mask')
orig_fname = '-'.join(parts[:mask_idx]) + '-nomask-fermatas.utf'
parts_mask = ''.join(map(lambda x: x[0], parts[mask_idx+1:-1]))
return orig_fname, parts_mask
@click.command()
@click.argument('output', required=True, type=click.Path(exists=True))
@click.argument('masked', required=True, type=click.Path(exists=True))
@click.argument('reference', required=True, type=click.Path(exists=True))
def harm_error_rate_single(output, masked, reference):
txt_to_utf, _= build_vocabulary()
n_tok_correct = 0
n_tok_total = 0
n_frame_correct = 0
n_frame_total = 0
out_fd = codecs.open(output, 'r', 'utf8').read()
masked_fd = codecs.open(masked, 'r', 'utf8').read()
ref_fd = codecs.open(reference, 'r', 'utf8').read()
frame_buffer_out = u''
frame_buffer_ref = u''
for o, (m, r) in zip(out_fd, zip(masked_fd, ref_fd)):
if r in [txt_to_utf[CHORD_BOUNDARY_DELIM], END_DELIM] and len(frame_buffer_out) > 0: # new frame
n_frame_total += 1
if frame_buffer_ref == frame_buffer_out:
n_frame_correct += 1
frame_buffer_out = u''
frame_buffer_ref = u''
elif m == BLANK_MASK_UTF: # the model is making a prediction
n_tok_total += 1
frame_buffer_out += o
frame_buffer_ref += r
if o == r:
n_tok_correct += 1
return (n_tok_correct, n_tok_total), (n_frame_correct, n_frame_total)
@click.command()
@click.option('-i', '--input-path', required=True, type=click.File('rb'))
def pca_metric(input_path):
"""Computes distance to Bach centroid in 2D PCA."""
out_fp = SCRATCH_DIR + '/score-compute_PCs.npy'
if not os.path.exists(out_fp):
_compute_PCs(out_fp)
PCs = np.fromfiile(out_fp)
print PCs
# TODO: find centroid of Bach cluster
# TODO: compute and return neuclidian distance of PC projectio nof input_path file to Bach centroid
def _compute_PCs(out_fp):
"""Computes pitch class per measure principal components."""
bachBundle = corpus.search('bwv')
bachBundle = bachBundle.search('4/4')
# NOTE: we should refactor this into a separate helper function: music21 Stream -> Pitch class histogram
index =0
data = {}
for n in range(len(bachBundle)):
data[n] = {}
for i in range(30,100):
data[n][i] = 0
for n in range(len(bachBundle)):
myPiece = bachBundle[n].parse()
for m in myPiece.flat.getElementsByClass('Note'):
data[n][m.midi] +=1
print 'Number %i' % n
new_data = np.array([data[0].values()]).astype(np.float64)
new_data /= np.sum(new_data)
for index in range(len(bachBundle)):
temp = np.array([data[index].values()]).astype(np.float64)
temp /= np.sum(temp)
new_data = np.concatenate((new_data, temp) , axis=0)
print 'Statistics gathered!'
save = new_data
###############################################################################
bachBundle = corpus
bachBundle = bachBundle.search('4/4')
index =0
data = {}
for n in range(700, 2500):
data[n] = {}
for i in range(30,100):
data[n][i] = 0
for n in range(700, 2500):
myPiece = bachBundle[n].parse()
for m in myPiece.flat.getElementsByClass('Note'):
data[n][m.midi] +=1
print 'Number %i' % n
new_data = np.array([data[700].values()])
new_data /= np.sum(new_data)
for index in range(700, 2500):
temp = np.array([data[index].values()]).astype(np.float64)
temp /= np.sum(temp)
new_data = np.concatenate( (new_data, temp ) , axis=0)
print 'Statistics gathered!'
X = new_data
d = np.concatenate((save,X))
n_components=2
pca = PCA(n_components=n_components).fit(d)
pca.components_.tofile(out_fp)
print '{} PCs written to {}'.format(n_components, out_fp)
map(score.add_command, [
pca_metric,
harm_error_rate
])
|
# An example cipher suite for secure on-board communication. This is
# in no way cryptographically secure. DO NOT USE IN THE REAL WORLD!
from cantools.database import Message
from typing import Union
class SnakeOilAuthenticator:
"""A snake oil authenticator for secure on-board communication
The sole purpose of this class is to demonstrate how SecOC can be
implemented using cantools. These algorithms are in no way
cryptographically secure! DO NOT USE THEM IN THE REAL WORLD!
"""
def __init__(self,
secret: Union[str, bytes, bytearray]):
if isinstance(secret, str):
self._secret = secret.encode()
else:
self._secret = bytes(secret)
def __call__(self,
dbmsg: Message,
auth_data: bytearray,
freshness_value: int) \
-> bytearray:
v0 = freshness_value%253
# XOR the secret and the data which we ought to authenticate
result = bytearray([v0]*5)
for i in range(0, len(auth_data)):
result[i % len(result)] ^= auth_data[i]
result[i % len(result)] ^= self._secret[i%len(self._secret)]
return result
|
import subprocess
import re
def download(build_url, local_path):
print("Build-URL: {}".format(build_url))
print("Local-PATH: {}".format(local_path))
if not build_url: return False
retval = ""
status = False
curl_cmd = "curl --retry 15 -o {} {}".format(local_path, build_url)
for count in range(3):
try:
print("Download CURL CMD: '{}'".format(curl_cmd))
proc = subprocess.Popen(curl_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
proc.wait()
if proc.returncode != 0:
retval = "Error: Failed to execute '{}' ('{}')\n".format(curl_cmd, err.strip())
else:
retval = retval + out.strip() + err.strip()
except Exception:
print("Error: Exception occurred while executing the command '{}'".format(curl_cmd))
return None
if retval.strip() != "":
print(retval)
if re.search(r"curl:\s+\(\d+\)", retval):
errorline = [m for m in retval.split("\n") if re.search(r"curl:\s+\(\d+\)", m)]
errorline = str("".join(errorline))
msg = "Image download to host location failed using curl command. Error: '{}'"
msg = msg.format(errorline)
print(msg)
if count < 2: continue
retval = ""
filetype_cmd = "file {}".format(local_path)
try:
print("File CMD: '{}'".format(filetype_cmd))
proc = subprocess.Popen(filetype_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
proc.wait()
if proc.returncode != 0:
retval = "Error: Failed to execute '{}' ('{}')\n".format(filetype_cmd, err.strip())
else:
retval = retval + out.strip() + err.strip()
except Exception:
pass
if not re.search(r":\s+data", retval):
print(retval)
errorline = retval.split("\n")[0]
msg = "Image downloaded to host location is not a proper image type. File type: '{}'"
msg = msg.format(errorline)
print(msg)
if count < 2: continue
else:
status = True
break
return status
|
"""Cleanup unhosted and rogue pipe and duct insulation."""
from __future__ import print_function
import collections
import datetime
import itertools
import clr
clr.AddReference("RevitAPI")
clr.AddReference("RevitAPIUI")
import Autodesk.Revit.DB as db
import Autodesk.Revit.UI as ui
clr.AddReference("System.Windows.Forms")
import System.Windows.Forms as swf
__name = "InsulationCleanup.py"
__version = "1.0b"
CHECK = "✔"
ERROR = "✘"
def main():
"""Main Script."""
print("🐍 Running {fname} version {ver}...".format(fname=__name, ver=__version))
# STEP 0: Setup
doc = __revit__.ActiveUIDocument.Document
# STEP 1: Inspect Model and summarize findings
pipe_insulations = query_all_elements_of_category(
doc=doc, cat=db.BuiltInCategory.OST_PipeInsulations)
duct_insulations = query_all_elements_of_category(
doc=doc, cat=db.BuiltInCategory.OST_DuctInsulations)
rogue_pipe, unhosted_pipe = find_rogue_and_unhosted_elements(
doc=doc, elems=pipe_insulations)
rogue_duct, unhosted_duct = find_rogue_and_unhosted_elements(
doc=doc, elems=duct_insulations)
summary_list = write_summary(
tpipe=pipe_insulations, tduct=duct_insulations, # totals
upipe= unhosted_pipe, uduct=unhosted_duct, # unhosted
rpipe=rogue_pipe, rduct=rogue_duct) # rogue
summary_text = "\n".join(summary_list)
print(summary_text)
# STEP 2: Receive User Input
dialog = ui.TaskDialog(title="Insulation Cleanup")
dialog.MainInstruction = "Insulation Cleanup Summary"
dialog.MainContent = summary_text
dialog.AddCommandLink(ui.TaskDialogCommandLinkId.CommandLink1, "Write Report")
dialog.AddCommandLink(ui.TaskDialogCommandLinkId.CommandLink2, "Clean Insulation")
dialog.CommonButtons = ui.TaskDialogCommonButtons.Close
dialog.DefaultButton = ui.TaskDialogResult.Close
result = dialog.Show()
# STEP 3: Write report or clean up insulation
if result == ui.TaskDialogResult.CommandLink1: # Write report
save_dialog = swf.SaveFileDialog()
save_dialog.Title = "Save Insulation Cleanup Report"
save_dialog.Filter = "Text files|*.txt"
save_dialog.FileName = "report.txt"
if save_dialog.ShowDialog() == swf.DialogResult.OK: # Save report
file_path = save_dialog.FileName
print("Writing report to {0}".format(file_path))
with open(file_path, mode="wb") as fh:
report = write_report(
doc, unhosted_pipe, rogue_pipe, unhosted_duct, rogue_duct)
for line in report:
fh.write("{line}\r\n".format(line=line))
print("✔\nDone. 😊")
return ui.Result.Succeeded
else: # Don't save report
print("🛈 File save dialog canceled.")
return ui.Result.Cancelled
elif result == ui.TaskDialogResult.CommandLink2: # Clean Insulation
transaction = db.Transaction(doc)
transaction.Start("{name} - v{ver}".format(name=__name, ver=__version))
try:
print("Cleaning Insulation...")
for pipe_element in unhosted_pipe:
doc.Delete(pipe_element.Id)
print("Deleted {num} unhosted pipe insulation elements".format(
num=len(unhosted_pipe)))
for pipe_pair in rogue_pipe:
cleanup_insulation(pipe_pair)
print("Moved {num} rogue pipe insulation elements.".format(
num=len(rogue_pipe)))
for duct_element in unhosted_duct:
doc.Delete(duct_element.Id)
print("Deleted {num} unhosted duct insulation elements.".format(
num=len(unhosted_duct)))
for duct_pair in rogue_duct:
cleanup_insulation(duct_pair)
print("Moved {num} rogue duct insulation elements.".format(
num=len(rogue_duct)))
except Exception as exception:
print("Failed.\nException:\n{ex}".format(ex=exception))
transaction.RollBack()
return ui.Result.Failed
else:
print("✔\nDone. 😊")
transaction.Commit()
return ui.Result.Succeeded
else:
print("Nothing to do.")
return ui.Result.Cancelled
ElementHostPair = collections.namedtuple("ElementHostPair", ["element", "host"])
def query_all_elements_of_category(doc, cat):
"""Return all elements of a category from a document."""
collector = db.FilteredElementCollector(doc)
elements = collector.OfCategory(cat)\
.WhereElementIsNotElementType()\
.ToElements()
return elements
def find_rogue_and_unhosted_elements(doc, elems):
"""Find all rogue and unhosted insulation elements."""
unhosted_elements = []
rogue_elements = []
for element in elems:
host_id = element.HostElementId
host_element = doc.GetElement(host_id)
if host_element is None:
unhosted_elements.append(element)
else:
if element.WorksetId != host_element.WorksetId:
rogue_elements.append(ElementHostPair(element, host_element))
return rogue_elements, unhosted_elements
def cleanup_insulation(pair):
"""Cleanup rogue and unhosted insulation elements."""
element_workset_id = pair.element.WorksetId.IntegerValue
host_workset_id = pair.host.WorksetId.IntegerValue
host_workset_parameter = pair.host.get_Parameter(
db.BuiltInParameter.ELEM_PARTITION_PARAM)
host_workset_parameter.Set(element_workset_id)
host_workset_parameter.Set(host_workset_id)
def write_summary(tpipe, tduct, upipe, rpipe, uduct, rduct):
"""Write a summary of rogue and unhosted insulation elements."""
summary = []
summary.append("Pipe Insulation:")
summary.append("{res} Found {num} rogue pipe insulation elements.".format(
num=len(rpipe), res=ERROR if len(rpipe) else CHECK))
summary.append("{res} Found {num} unhosted pipe insulation elements.".format(
num=len(upipe), res=ERROR if len(upipe) else CHECK))
summary.append("There is a total of {tot} pipe insulation elements in the model.".format(
tot=len(tpipe)))
summary.append("Duct Insulation:")
summary.append("{res} Found {num} rogue duct insulation elements.".format(
num=len(rduct), res=ERROR if len(rduct) else CHECK))
summary.append("{res} Found {num} unhosted duct insulation elements.".format(
num=len(uduct), res=ERROR if len(uduct) else CHECK))
summary.append("There is a total of {tot} duct insulation elements in the model.".format(
tot=len(tduct)))
return summary
def write_report(doc, upipe, rpipe, uduct, rduct):
"""Write report of rogue and unhosted insulation elements."""
workset_table = doc.GetWorksetTable()
rogue = len(rpipe) + len(rduct)
unhosted = len(upipe) + len(uduct)
report = []
# write header with general information
report.append("reporting time, {now}".format(now=datetime.datetime.now()))
report.append("rogue elements, {num}".format(num=rogue))
report.append("unhosted elements, {num}".format(num=unhosted))
report.append("---")
# define csv structure template and header line
line_template = "{idx},{eid},'{en}','{ews}',{hid},'{hn}','{hws}'"
report.append(
"index,element id,element name,element workset,host id,host name,host workset")
# write rogue element data:
for idx, pair in enumerate(itertools.chain(rpipe, rduct), start=1):
elem, host = pair
elem_workset = workset_table.GetWorkset(elem.WorksetId)
host_workset = workset_table.GetWorkset(host.WorksetId)
line = line_template.format(
idx=idx,
eid=elem.Id.IntegerValue, en=elem.Name, ews=elem_workset.Name,
hid=host.Id.IntegerValue, hn=host.Name, hws=host_workset.Name)
report.append(line)
# write unhosted element data:
for idx, elem in enumerate(itertools.chain(upipe, uduct), start=1):
elem_workset = workset_table.GetWorkset(elem.WorksetId)
line = line_template.format(
idx=idx,
eid=elem.Id.IntegerValue, en=elem.Name, ews=elem_workset.Name,
hid="-", hn="-", hws="-")
report.append(line)
return report
if __name__ == "__main__":
#__window__.Hide()
result = main()
if result == ui.Result.Succeeded:
__window__.Close()
|
'''
Tipos de dados
str - string - textos 'Assim' ou "Assim"
int - inteiro - 12345 0 -9 10 -11
float - real/ponto flutuante 16.65 -10.6
bool - booleano/lógico - True/False
'''
print('- Isaac', type('Luiz'))
print('-', 10, type(10))
print('-', 23.25, type(23.25))
print('-', 10==10, type(10==10))
print('-', 'L'=='l', type('L'=='l'))
# converter tipos
print('--'*15)
print('luiz', type('luiz'), bool('luiz')) # Para strings não vazias sempre retorna True
print('10', type('10'), ',', type(int('10')))
|
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
"""
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
"""
class Codec:
def encode(self, root):
"""Encodes an n-ary tree to a binary tree.
:type root: Node
:rtype: TreeNode
"""
if root is None:
return None
binary = TreeNode(root.val) # Create a root node
if root.children is None or len(root.children) <= 0:
return binary
binary.right = self.encode(root.children[0]) # Right child of binary is the encoding of of all n-ary children, starting with the first child. Other children of n-array root are left child of previous nodes child.
node = binary.right
for child in root.children[1:]:
node.left = self.encode(child)
node = node.left
return binary
def decode(self, data):
"""Decodes your binary tree to an n-ary tree.
:type data: TreeNode
:rtype: Node
"""
if data is None:
return None
n_ary = Node(data.val, []) # Create n-ary root
node = data.right # Move to first child of n-ary root
while node: # while more children of n-ary root
n_ary.children.append(self.decode(node))
node = node.left
return n_ary
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(root))
|
from bs4 import BeautifulSoup as bs
import os
import threading
import requests
import random
host = "http://index-of.co.uk/"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"}
"""
with requests.Session() as s:
page = s.get("http://index-of.co.uk/ASP/", headers=headers)
soup = bs(str(page.content), "html.parser")
clean = soup.prettify("utf-8")
#with open(os.getcwd()+"/ASP.html", "r") as f:
# page = f.readlines()
#soup = bs(open(os.getcwd()+"/home.html", "r"), "html.parser")
#clean = soup.prettify("utf-8")
with open(os.getcwd()+"/ASP.html", "w") as f:
f.write(clean)
def link_is_file(link):
if link[:4] == "http":
return False
elif link[-1] == "/":
return False
elif not link[0].isupper():
return False
else:
return True
soup = bs(open(os.getcwd()+"/ASP.html", "r"), "html.parser")
a_tags = soup.find_all("a")
for a in a_tags:
link = a["href"]
if link_is_file(link):
print link
#soup = bs(str(a_tags), "html.parser")
#print(soup.a["href"])
"""
def main():
a = one()
print("Done at main")
def one():
threads_list = []
for i in range(3):
new_thread = threading.Thread(target=two, args=([3]))
threads_list.append(new_thread)
for thread in threads_list:
thread.start()
for thread in threads_list:
thread.join()
print "Done"
def two(num):
vals = []
for x in range(num):
vals.append(x)
print vals
if __name__ == "__main__":
main()
from bs4 import BeautifulSoup as bs
import os
import threading
import requests
host = "http://index-of.co.uk/"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"}
def main():
site_dir_list = get_site_dir_list() #get the directory link from the site to cmpre incase there is new
create_threads(site_dir_list)
def create_threads(dir_link):
with open(os.getcwd()+"/retrieved_dir.dmd", "r") as f: # open the retrieved dirs file and read the content
retrieved = f.readlines()
with open(os.getcwd()+"/dirs.dmd", "r") as f: # open the dirs.dmd file and read the content
dir_links = f.readlines()
for link in retrieved: # remove any dir that has alreay been retrieved completely
if link in dir_links:
dir_links.remove(link)
while dir_links != []:
if not os.path.isfile(os.getcwd()+"/links.dmd"): # check if the links.dmd file exist [keep track of the time]
with open(os.getcwd()+"/links.dmd", "w") as f:
pass
retrieved_links = []
else:
with open(os.getcwd()+"/links.dmd", "r") as f: # it exist so opening it for reading
retrieved_links = f.readlines()
threads_list = []
for _ in range(10):
if dir_links != []:
new_thread = threading.Thread(target=get_links, args=(dir_links[-1].strip("\n"), retrieved_links))
threads_list.append(new_thread)
print("trying to remove %s \n"%dir_links[-1])
dir_links.remove(dir_links[-1])
print("new length of list is %d \n"%len(dir_links))
else:
print("Exhausted directory links")
if threads_list != []: #confirms that threads list is not empty
for thread in threads_list:
thread.start()
for thread in threads_list:
thread.join()
else:
print("Exhausted Threads list")
if new_links != []:
with open(os.getcwd()+"/links.dmd", "a") as f: #we are updating the retrieved links file
for link in new_links:
f.write(link+"\n")
print("wrote new links")
if retrieved_dirs != []:
with open(os.getcwd()+"/retrieved_dir.dmd", "a") as f:
for dir in retrieved_dirs:
f.write(dir+"\n")
def get_links(dir_link):
with requests.Session() as s:
page = s.get(host+"/"+dir_link, headers=headers) #open the page of the current directory
if not os.path.isdir(os.getcwd()+"/"+dir_link): #check if the directory has not been created
os.mkdir(os.getcwd()+"/"+dir_link) # create the directory
files_link = get_files_link(page.content) # call the function that retrieve and returns the links
with requests.Session() as s:
for link in files_link:
file_name = validate_filename(link)
if not os.path.isfile(os.getcwd()+"/"+dir_link+file_name):
link = link.strip("\n")
print("trying to retrieve %s from %s \n"%(file_name, dir_link))
file_get = s.get(host+"/"+dir_link+link, headers=headers)
with open(os.getcwd()+"/"+dir_link+file_name, "wb") as f:
f.write(file_get.content)
print("Retrieved %s \n"%file_name)
def validate_filename(link):
name = link.replace("%20", "_")
return name
def get_files_link(page):
links = []
soup = bs(str(page), "html.parser")
a_tags = soup.find_all("a")
for a in a_tags:
link = a["href"]
if link_is_file(link):
links.append(link)
return links
def link_is_file(link):
if link[:4] == "http":
return False
elif link[-1] == "/":
return False
elif not link[0].isupper():
return False
else:
return True
def get_dir_list():
with open(os.getcwd()+"/dirs.dmd", "r") as f: #if u can't read this, it's not meant 4 u
dir_list = f.readlines()
return dir_list
#this function retrieve the link of dirs from the site
def get_site_dir_list():
links = []
with requests.Session() as s:
open_homepage = s.get(host, headers=headers) #opening the homepage of the site
homepage = open_homepage.content # this is trivial
soup = bs(str(homepage), "html.parser")
a_tags = soup.find_all("a")
all_links = []
for a in a_tags:
link = a["href"] #extracting the link text with bs
all_links.append(link)
for link in all_links:
if is_link_dir(link):
links.append(link)
return links
# performs simple checks if don't get this too, this code is not for u
def is_link_dir(link):
if link[:4] == "http":
return False
elif link[-1] != "/":
return False
elif not link[0].isupper():
return False
else:
return True
if __name__=="__main__":
main()
"""
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Secrets Manager"
prefix = "secretsmanager"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CancelRotateSecret = Action("CancelRotateSecret")
CreateSecret = Action("CreateSecret")
DeleteResourcePolicy = Action("DeleteResourcePolicy")
DeleteSecret = Action("DeleteSecret")
DescribeSecret = Action("DescribeSecret")
GetRandomPassword = Action("GetRandomPassword")
GetResourcePolicy = Action("GetResourcePolicy")
GetSecretValue = Action("GetSecretValue")
ListSecretVersionIds = Action("ListSecretVersionIds")
ListSecrets = Action("ListSecrets")
PutResourcePolicy = Action("PutResourcePolicy")
PutSecretValue = Action("PutSecretValue")
RestoreSecret = Action("RestoreSecret")
RotateSecret = Action("RotateSecret")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateSecret = Action("UpdateSecret")
UpdateSecretVersionStage = Action("UpdateSecretVersionStage")
ValidateResourcePolicy = Action("ValidateResourcePolicy")
|
from django.apps import AppConfig
class AzuremanagementConfig(AppConfig):
name = 'AzureManagement'
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class MusicWorks(db.Model):
__tablename__ = "music_works"
iswc = db.Column(db.String, unique=True, primary_key=True)
title = db.Column(db.String)
contributors = db.Column(db.String)
sources = db.Column(db.String)
|
import yaml
import sys
def dereferenceAll(obj, parent):
if type(obj) is dict:
for fieldStr in obj:
#print(fieldStr)
if(fieldStr == '$ref'):
refPath = obj[fieldStr].split('/')
refObj = parent
for refPart in refPath:
if refPart in refObj:
refObj = refObj[refPart]
refObj = dereferenceAll(refObj, parent)
obj = {**obj, **refObj}
elif(fieldStr == 'allOf'):
comboObj = {'properties': {}}
for item in obj[fieldStr]:
itemObj = dereferenceAll(item, parent)
comboObj['properties'] = {**(comboObj['properties']), **(itemObj['properties'])}
obj = comboObj
else:
obj[fieldStr] = dereferenceAll(obj[fieldStr], parent)
if '$ref' in obj:
obj.pop('$ref')
elif type(obj) is list:
newList = []
for item in obj:
newList.append(dereferenceAll(item, parent))
obj = newList
#print(obj)
return obj
def dereferenceBrAPI(filePath = './brapi_openapi.yaml'):
fileObj = {}
print(filePath)
with open(filePath, "r") as stream:
try:
fileObj = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
fileObj = dereferenceAll(fileObj, fileObj)
return fileObj;
|
from enum import Enum
class ThresholdType(Enum):
AVG = "avg"
MAX_FRACT_2 = "maxdiv2"
|
# -*- coding:utf-8 -*-
# responses.py
import os, sys
import re
import mimetypes
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseNotFound, \
HttpResponseBadRequest, HttpResponseNotAllowed, HttpResponseForbidden, \
HttpResponseServerError, Http404, \
StreamingHttpResponse, JsonResponse
from krust.file_utils import *
from krux.random import random_str
def file_response(fname, save_name=None, content_type=None, remove_on_finish=False):
del_tmp = False
if re.match(r'^s3[c]?://.*', fname):
try:
tmpfile = '/tmp/tmp.%s%s' % (random_str(8), get_ext(fname))
CP(fname, tmpfile)
fname = tmpfile
del_tmp = True
except Exception as e:
return HttpResponseNotFound()
elif not os.path.exists(fname):
return HttpResponseNotFound()
if content_type is None:
content_type, encoding = mimetypes.guess_type(fname)
response = HttpResponse(open(fname, 'rb').read(), content_type)
if save_name is None:
response['Content-Disposition'] = 'attachment;'
else:
response['Content-Disposition'] = 'attachment; filename="%s"' % save_name
if del_tmp and fname.startswith('/tmp/tmp.'):
RM(fname)
if remove_on_finish and os.path.isfile(fname):
RM(fname)
return response
def string_response(s, save_name=None, content_type=None):
if save_name is None:
save_name = 'tmpstr'
if not content_type :
if save_name:
content_type, encoding = mimetypes.guess_type(save_name)
else:
content_type = 'application/octet-stream'
else:
if len(content_type) < 6:
if not content_type.startswith('.'):
content_type = '.' + content_type
content_type, encoding = mimetypes.guess_type(content_type)
response = HttpResponse(s, content_type)
response['Content-Disposition'] = 'attachment; filename="%s"' % save_name
return response
class ExceptionJsonResponse(JsonResponse):
status_code = 400
class ErrorJsonResponse(JsonResponse):
status_code = 500
|
# MIT License
#
# Copyright (c) 2021 Clivern
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from pindo.runner import Runner
from pindo.runner import Runner
from pindo.runtime.docker.go import Go
from pindo.runtime.docker.java import Java
from pindo.runtime.docker.php import PHP
from pindo.runtime.docker.python import Python
from pindo.runtime.docker.ruby import Ruby
from pindo.runtime.docker.rust import Rust
from pindo.runtime.docker.engine import Engine
def test_docker_engine():
ruby_code = Runner.ruby("~~", "3.0.0")
php_code = Runner.php("~~", "8.1")
python_code = Runner.python("~~", "3.9")
go_code = Runner.go("~~", "1.17")
rust_code = Runner.rust("~~", "1.57.0")
java_code = Runner.java("~~", "17.0")
assert isinstance(Engine.get_runtime(ruby_code), Ruby) == True
assert isinstance(Engine.get_runtime(php_code), PHP) == True
assert isinstance(Engine.get_runtime(python_code), Python) == True
assert isinstance(Engine.get_runtime(go_code), Go) == True
assert isinstance(Engine.get_runtime(rust_code), Rust) == True
assert isinstance(Engine.get_runtime(java_code), Java) == True
|
# coding=utf-8
import logging
import imaplib
import os
import re
from . import app_settings
from .models import (
IP,
LocationLocal
)
from celery.task import (
PeriodicTask,
Task,
)
from datetime import timedelta
from django.conf import settings
from shared.utils import list_remove_duplicates
logger = logging.getLogger('ip_assembler')
class UpdateLocationsIfNecessaryTask(PeriodicTask):
"""
Tasks that checks if at least settings.IP_ASSEMBLER_IP_CHANGED_THRESHOLD IPs have changed since last run.
If so, it calls the UpdateHtaccessLocationsTask.
Last changed dates is written in settings.IP_ASSEMBLER_IP_CHANGED_FILE.
"""
run_every = timedelta(minutes=60)
def run(self, **kwargs):
"""
Does the magic!
"""
logger.info('UpdateLocationsIfNecessaryTask was called')
# read last ip count
try:
with open(app_settings.IP_ASSEMBLER_IP_CHANGED_FILE, 'r') as f:
content_list = f.readlines()
if len(content_list) == 0:
ip_count_old = -1
else:
ip_count_old = int(content_list[0])
except IOError:
ip_count_old = -1
logger.info('read IP count of %(count)d' % {'count': ip_count_old})
# if IPs have significantly changed, update the locations
ip_count_now = IP.objects.count()
if ip_count_now == -1 or ip_count_now > ip_count_old + app_settings.IP_ASSEMBLER_IP_CHANGED_THRESHOLD:
logger.info('Checking IP counts, last: %(ip_count_old)d - now: %(ip_count_now)d' % {
'ip_count_old': ip_count_old,
'ip_count_now': ip_count_now
})
# call the updater task
UpdateHtaccessLocationsTask().delay()
# write the new count to the file
try:
open(app_settings.IP_ASSEMBLER_IP_CHANGED_FILE, 'w').close()
with open(app_settings.IP_ASSEMBLER_IP_CHANGED_FILE, 'w') as f:
f.write(str(ip_count_now))
except IOError:
logger.exception('unable to write to file %(file_path)s' % {'file_path': app_settings.IP_ASSEMBLER_IP_CHANGED_FILE})
else:
logger.info('nothing to do here')
class UpdateHtaccessLocationsTask(Task):
"""
Updates locations of .htaccess with new IPs.
"""
def run(self, **kwargs):
logger.info('UpdateHtaccessLocationsTask was called')
# the regex patterns
pattern0 = 'SetEnvIF REMOTE_ADDR ".*" DenyAccess'
pattern1 = 'SetEnvIF X-FORWARDED-FOR ".*" DenyAccess'
pattern2 = 'SetEnvIF X-CLUSTER-CLIENT-IP ".*" DenyAccess'
for location in LocationLocal.objects.all():
logger.info('Updating .htaccess file: %(location)s' % {'location': location.path})
try:
f = open(location.path, 'r')
content_old = ''.join(f.readlines())
f.close()
except IOError:
logger.exception('unable to read from file %(path)s' % {'path': location.path})
return
logger.info('read content of length %(length)d' % {'length': len(content_old)})
# list of all positions of occurrences
occurrences_r0 = [m.start(0) for m in re.finditer(pattern0, content_old)]
occurrences_r2 = [m.start(0) for m in re.finditer(pattern2, content_old)]
if len(occurrences_r0) == 0 or len(occurrences_r2) == 0:
start = 0
end = 0
else:
# start index where the IPs are declared
start = occurrences_r0[0]
# end index of IPs
# the occurrences_r2[-1] returns only the index of the last occurrence that has a dynamic length,
# so we search for it and append its length to get the last character
end = occurrences_r2[-1] + len(re.findall(pattern2, content_old)[-1]) + 1
# contents before the IPs
content_new = content_old[:start]
# start writing new IPs
for ip in IP.objects.all().order_by('seg_0', 'seg_1', 'seg_2', 'seg_3'):
replacement = '^%(seg_0)s\.%(seg_1)s\.%(seg_2)s\.%(seg_3)s$' % {
'seg_0': ip.seg_0,
'seg_1': ip.seg_1,
'seg_2': ip.seg_2,
'seg_3': ip.seg_3 if ip.seg_3 != '*' else '[0-9]+',
}
content_new += str(pattern0.replace('.*', replacement)) + '\n'
content_new += str(pattern1.replace('.*', replacement)) + '\n'
content_new += str(pattern2.replace('.*', replacement)) + '\n'
# contents after the IPs
content_new += content_old[end:]
# go to beginning of file and write
logger.info('writing new content with length %(length)d' % {'length': len(content_new)})
# try to chmod +w the file
try:
os.system('chmod +w %(path)s' % {'path': location.path})
except OSError:
logger.exception('unable to chmod the file on path %(path)s' % {'path': location.path})
# write to the file
try:
f = open(location.path, 'w')
f.write(content_new)
f.close()
except IOError:
logger.exception('unable to write to file %(path)s' % {'path': location.path})
return
# remove write permissions
try:
os.system('chmod -w %(path)s' % {'path': location.path})
except OSError:
logger.exception('unable to chmod the file on path %(path)s' % {'path': location.path})
logger.info('done')
class IPEMailChecker(PeriodicTask):
"""
Periodic task checking the mailbox for new mails about WP spamming..
"""
run_every = timedelta(minutes=60)
def __init__(self):
"""
Init method setting the regular expressions.
"""
self.regex_expressions = [
re.compile(".*ip_tracer/(.*)\).*", re.IGNORECASE | re.MULTILINE | re.UNICODE | re.VERBOSE),
re.compile(".*IP address (.*) has been.*"),
re.compile(".*Ein Host, (.*)\(.*")
]
def run(self, **kwargs):
"""
Checks the IMAP mailbox for new mails and tries to handle them.
"""
try:
# connect to server and login
box = imaplib.IMAP4_SSL(settings.IMAP_SERVER)
box.login(settings.IMAP_USERNAME, settings.IMAP_PASSWORD)
box.select()
# search for all mails in the mailbox
result, mail_indices = box.search(None, 'ALL')
# if everything was ok...
if result == 'OK':
# check number of mails
mail_count = len(mail_indices[0].split())
logger.info('found %(mail_count)d mails...' % {'mail_count': mail_count})
# iterate the mail indices and fetch the mails
ips_created = 0
for mail_index in mail_indices[0].split():
logger.info('fetching mail %(mail_index)s...' % {'mail_index': int(mail_index)})
# mail data is a list with a tuple
sub_result, mail_data = box.fetch(mail_index, '(BODY[TEXT])')
if sub_result == 'OK':
# fetch the ips
ips = list_remove_duplicates(
self.find_ips(''.join([str(data) for data in mail_data[0]]))
)
# if ips found, add them and delete the mail
if len(ips) > 0:
logger.info('found %(count)d IPs' % {'count': len(ips)})
ips_created += IP.batch_add_ips(ips)
box.store(mail_index, '+FLAGS', '\\Deleted')
else:
logger.error('fetching mail with index %(index)d failed' % {'index': mail_index})
# finally, if ips were added, unify the IPs
if ips_created > 0:
logger.info('created %(count)d IPs' % {'count': ips_created})
IP.unify_ips()
else:
logger.error('search returned not OK')
box.close()
box.logout()
except:
logger.exception('retrieving mail failed')
def find_ips(self, text):
"""
Uses some regex to find IPs within the text.
:param text: the text to search in
:type text: str
:return: list of ips
:rtype: list
"""
for regex in self.regex_expressions:
ips = regex.findall(text)
if len(ips) > 0:
return ips
return []
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: util.py
#
# 通用函数库
#
import hashlib
def to_bytes(data):
if isinstance(data, bytes):
return data
elif isinstance(data, (str, int, float)):
return str(data).encode("utf-8")
else:
raise TypeError
MD5 = lambda data: hashlib.md5(to_bytes(data)).hexdigest()
|
import aoc
# read polymer and templates
polymer, raw_templates = aoc.blocks("14_in.txt")
templates = dict(line.split(" -> ") for line in aoc.lines(raw_templates))
# do 10 iterations of inserting
for _ in range(10):
new_polymer = ""
for index in range(len(polymer)-1):
pair = polymer[index:index+2]
new_polymer += polymer[index] + templates[pair]
new_polymer += polymer[-1]
polymer = new_polymer
# count letters
counts = dict([(char, 0) for char in polymer])
for char in polymer:
counts[char] = counts[char] + 1
# print result
print(max(counts.values()) - min(counts.values()))
|
import asyncio
import pathlib
from fastapi.testclient import TestClient
from mainpy import main
from jax_pep503.main import app, get_package_links
HTML_DIR = pathlib.Path('./docs')
client = TestClient(app)
def build_index():
asyncio.run(_build_index())
@main
async def _build_index(html_dir: pathlib.Path = HTML_DIR):
html_dir.mkdir(exist_ok=True)
url_paths = ['/']
package_links = await get_package_links()
for package in package_links:
url_paths.append(f'/{package}/')
for url_path in url_paths:
response = client.get(url_path)
response.raise_for_status()
html_path = html_dir / url_path.removeprefix('/')
if not url_path.endswith('.html'):
html_path.mkdir(parents=True, exist_ok=True)
html_path /= 'index.html'
print(f'{url_path} -> {html_path}')
html_path.write_bytes(response.content)
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
class form_cadastro_livro(FlaskForm):
titulo = StringField('Título')
isbn = StringField('ISBN')
autor = StringField('Autor')
anoPublicacao = StringField('Ano publicação')
estoque = StringField('Estoque')
preco = StringField('Preço')
cadastrar = SubmitField('Cadastrar')
|
import sys
import signal
import cv2
from contextlib import contextmanager
class Color: # pylint: disable=W0232
GRAY = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
CRIMSON = 38
def colorize(num, string, bold=False, highlight=False):
assert isinstance(num, int)
attr = []
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def colorprint(colorcode, text, o=sys.stdout, bold=False):
o.write(colorize(colorcode, text, bold=bold))
def warn(msg):
print(colorize(Color.YELLOW, msg))
def error(msg):
print(colorize(Color.RED, msg))
# http://stackoverflow.com/questions/366682/how-to-limit-execution-time-of-a-function-call-in-python
class TimeoutException(Exception): pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException(colorize(Color.RED, " *** Timed out!", highlight=True))
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def img_debug(img, message="debug"):
cv2.imshow(message, img)
cv2.waitKey(0)
|
#!/usr/bin/env python3
from transport.transport_tcp import TransportTCP
from transport.transport_serial import TransportSerial
from protocol.communicator import Communicator
from protocol.defs.defs2x6.defs2x6 import *
from protocol.wrapper import *
import time
import json
from json import JSONEncoder
def _default(self, obj):
return getattr(obj.__class__, "__json__", _default.default)(obj)
_default.default = JSONEncoder().default
JSONEncoder.default = _default
#######################################
def selftest_update(comm):
requests = parameter_requests
commands = [g.command for g in requests]
response = comm.readRegisterBulk(commands)
results = list(map(lambda x, y: x(y), requests, response))
for r in results:
orig = r.toBytes()
r.update_recursive()
new = r.toBytes()
if(orig != new):
print(f"\nERROR: Following class is defective:\n {r}")
print(f"BEFORE: {orig.hex()}")
print(f" AFTER: {new.hex()}")
return False
return True
def backup_all_paramters(comm):
requests = parameter_requests
commands = [g.command for g in requests]
response = comm.readRegisterBulk(commands)
results = list(map(lambda x, y: x(y), requests, response))
backup = []
for r in results:
values = {}
for k,v in r.values.items():
try:
values[k] = str(v.value)
except:
values[k] = str(v)
backup.append({"raw":r.toBytes().hex(), "values": values})
with open("backup.json", 'w') as outfile:
print(json.dump(backup, outfile, indent=4, sort_keys=True))
def selftest_write(comm):
p01p12 = pP01P12Group(comm.readRegister(pP01P12Group.command))
print("\nRead Values:\n")
print(p01p12)
p01 = p01p12.values["p01RoomTempDay"]
p01_backup = p01.value.value
p01.value.value = 19
p01p12.update_recursive()
time.sleep(5)
new = comm.writeRegister(pP01P12Group.command, p01p12.toBytes())
if new is not None:
p01p12 = pP01P12Group(new)
print("\nNew Values:\n")
print(p01p12)
p01 = p01p12.values["p01RoomTempDay"]
if p01.value.value == 19:
p01 = p01p12.values["p01RoomTempDay"]
p01.value.value = p01_backup
p01p12.update_recursive()
print(f"\nWrite successful, restoring old value: {p01_backup}\n")
time.sleep(5)
new2 = comm.writeRegister(pP01P12Group.command, p01p12.toBytes())
if new2 is not None:
p01p12 = pP01P12Group(new2)
print("\nRestored Values:\n")
print(p01p12)
p01 = p01p12.values["p01RoomTempDay"]
if p01.value.value == p01_backup:
print("\nOld Value restored, write works as expected!\n")
return True
return False
def main():
trans = TransportTCP("192.168.178.201", 7777)
#trans = TransportSerial("/dev/ttyUSB0", 9600)
comm = Communicator(trans)
comm.start()
w = Wrapper(comm)
#printStatus(comm)
#printRequests(comm, status_requests + parameter_requests)
#backup_all_paramters(comm)
#selftest_write(comm)
#print(w.getSingleParameter(p01RoomTempDay))
#status = w.getBulkStatus([sDhwTemp, sFlowTempHC1, sReturnTemp, sHeatingCircuitPump, sHeatRequest, sHcStage, sDhwStage])
#for k,v in status.items():
#print(v)
#print(w.setSingleParameter(p42Fanstage3AirflowOutlet, 170))
#time.sleep(1)
groups = w.getBulkGroups(PARAM_GROUPS)
#groups = w.getBulkGroups(STATUS_GROUPS)
for k,v in groups.items():
print(v)
#w.setSingleParameter(p01RoomTempDay, 22.0)
#print(json.dumps(groups, indent=4))
#print(w.getSingleGroup(sControlGroup))
comm.stop()
if __name__== "__main__":
main()
|
from py42.exceptions import Py42BadRequestError
from py42.exceptions import Py42CloudAliasLimitExceededError
from py42.services import BaseService
class DetectionListUserService(BaseService):
"""Administrator utility to manage High Risk employees information.
`Support Documentation <https://support.code42.com/Administrator/Cloud/Monitoring_and_managing/Detection_list_management_APIs>`__
"""
_resource = u"v2/user"
def __init__(self, connection, user_context, user_service):
super(DetectionListUserService, self).__init__(connection)
self._user_context = user_context
self._user_service = user_service
def _make_uri(self, action):
return u"{}{}".format(self._resource, action)
def create(self, username):
"""Create a detection list profile for a user.
Args:
username (str): Username of the user.
Returns:
:class:`py42.response.Py42Response`
"""
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"userName": username,
u"notes": "",
u"riskFactors": [],
u"cloudUsernames": [],
}
uri = self._make_uri(u"/create")
return self._connection.post(uri, json=data)
def get_by_id(self, user_id):
"""Get user details by user UID.
Args:
user_id (str or int): UID of the user.
Returns:
:class:`py42.response.Py42Response`
"""
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"userId": user_id,
}
uri = self._make_uri(u"/getbyid")
return self._connection.post(uri, json=data)
def get(self, username):
"""Get user details by username.
Args:
username (str): Username of the user.
Returns:
:class:`py42.response.Py42Response`
"""
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"username": username,
}
uri = self._make_uri(u"/getbyusername")
return self._connection.post(uri, json=data)
def update_notes(self, user_id, notes):
"""Add or update notes related to the user.
Args:
user_id (str or int): The user_id whose notes need to be updated.
notes (str): User profile notes.
Returns:
:class:`py42.response.Py42Response`
"""
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"userId": user_id,
u"notes": notes,
}
uri = self._make_uri(u"/updatenotes")
return self._connection.post(uri, json=data)
def add_risk_tags(self, user_id, tags):
"""Add one or more tags.
Args:
user_id (str or int): The user_id whose tag(s) needs to be updated.
tags (str or list of str ): A single tag or multiple tags in a list to be added.
e.g "tag1" or ["tag1", "tag2"]
Returns:
:class:`py42.response.Py42Response`
"""
if not isinstance(tags, (list, tuple)):
tags = [tags]
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"userId": user_id,
u"riskFactors": tags,
}
uri = self._make_uri(u"/addriskfactors")
return self._connection.post(uri, json=data)
def remove_risk_tags(self, user_id, tags):
"""Remove one or more tags.Args:
Args:
user_id (str or int): The user_id whose tag(s) needs to be removed.
tags (str or list of str ): A single tag or multiple tags in a list to be removed.
e.g "tag1" or ["tag1", "tag2"].
Returns:
:class:`py42.response.Py42Response`
"""
if not isinstance(tags, (list, tuple)):
tags = [tags]
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"userId": user_id,
u"riskFactors": tags,
}
uri = self._make_uri(u"/removeriskfactors")
return self._connection.post(uri, json=data)
def add_cloud_alias(self, user_id, alias):
"""Add a cloud alias.
Args:
user_id (str or int): The user_id whose alias needs to be updated.
alias (str): An alias to be added.
Returns:
:class:`py42.response.Py42Response`
"""
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"userId": user_id,
u"cloudUsernames": [alias],
}
uri = self._make_uri(u"/addcloudusernames")
try:
return self._connection.post(uri, json=data)
except Py42BadRequestError as err:
if "Cloud usernames must be less than or equal to" in err.response.text:
raise Py42CloudAliasLimitExceededError(err)
raise err
def remove_cloud_alias(self, user_id, alias):
"""Remove one or more cloud alias.
Args:
user_id (str or int): The user_id whose alias needs to be removed.
alias (str): An alias to be removed.
Returns:
:class:`py42.response.Py42Response`
"""
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"userId": user_id,
u"cloudUsernames": [alias],
}
uri = self._make_uri(u"/removecloudusernames")
return self._connection.post(uri, json=data)
def refresh(self, user_id):
"""Refresh SCIM attributes of a user.
Args:
user_id (str or int): The user_id of the user whose attributes need to be refreshed.
Returns:
:class:`py42.response.Py42Response`
"""
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"userId": user_id,
}
uri = self._make_uri(u"/refresh")
return self._connection.post(uri, json=data)
|
import os
from ingest.process.detection.src.preprocess import pad_image
from PIL import Image
import glob
import click
from ingest.process.proposals.connected_components import get_proposals
import shutil
from joblib import Parallel, delayed
import shutil
def process_f(f, pth):
basename = os.path.basename(f)[:-4]
img = Image.open(f)
proposals = get_proposals(img)
with open(os.path.join(pth, 'proposals', f'{basename}.csv'), 'w') as wf:
for proposal in proposals:
x1, y1, x2, y2 = proposal
wf.write(f'{x1},{y1},{x2},{y2}\n')
basename = os.path.basename(f)
img.save(os.path.join(pth, 'images_before_preprocess', basename))
pimg = pad_image(img)
pimg.save(f)
def write_proposals(pth):
if os.path.exists(os.path.join(pth, 'proposals')):
print('Found existing proposals. Wiping')
shutil.rmtree(os.path.join(pth, 'proposals'))
if os.path.exists(os.path.join(pth, 'images_before_preprocess')):
print('Original images found. Overwriting images dir')
shutil.rmtree(os.path.join(pth, 'images'))
shutil.move(os.path.join(pth, 'images_before_preprocess'), os.path.join(pth, 'images'))
os.makedirs(os.path.join(pth, 'proposals'))
os.makedirs(os.path.join(pth, 'images_before_preprocess'))
num_processes = int(os.environ['NUM_PROCESSES'])
Parallel(n_jobs=num_processes)(delayed(process_f)(f, pth) for f in glob.glob(os.path.join(pth, 'images', '*.png')))
@click.command()
@click.option('--train-path', type=str, help='Path to training data')
@click.option('--val-path', type=str, help='Path to training data')
def run(train_path, val_path):
write_proposals(train_path)
write_proposals(val_path)
if __name__ == '__main__':
run()
|
# -*- coding: utf-8 -*-
# Copyright .
# Author:
#
# cython: language_level=3
#
import copy
import json
import six
import math
import torch
from lib.handler import load_bert, get_vocab_size
import lib.config as cf
from lib.utils import reshape_tensor, mask, find_max_proper_batch, gelu
from torch.nn import functional
from my_py_toolkit.file.file_toolkit import readjson
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
return cls.from_dict(readjson(json_file))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class LocalLinear(torch.nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, in_features, out_features, bias=False):
super(LocalLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_normal_(self.weight, mode='fan_out')
if self.bias is not None:
# torch.nn.init.kaiming_normal_(self.bias, mode='fan_out')
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input):
return torch.nn.functional.linear(input, self.weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class Attention(torch.nn.Module):
"""
Attention
"""
def __init__(self, dim, attention_head_num, attention_probs_dropout_prob,
use_bias=False):
super(Attention, self).__init__()
self.dim = dim
self.attention_head_num = attention_head_num
self.use_bias = use_bias
self.dropout = torch.nn.Dropout(attention_probs_dropout_prob)
if not self.dim % self.attention_head_num == 0:
raise Exception(f"The dim({self.dim}) % attention_head_num({self.attention_head_num}) != 0")
self.size_per_head = int(self.dim / self.attention_head_num)
self.query_layer = torch.nn.Linear(self.dim, self.dim, self.use_bias)
self.key_layer = torch.nn.Linear(self.dim, self.dim, self.use_bias)
self.value_layer = torch.nn.Linear(self.dim, self.dim, self.use_bias)
self.softmax = torch.nn.Softmax(dim=-1)
def transpose4score(self, tensor, shape):
"""
为计算 score 对 tensor 进行转换.
Args:
tensor:
shape:
Returns:
"""
tensor = reshape_tensor(tensor, shape)
tensor = tensor.permute(0, 2, 1, 3)
return tensor
def forward(self, query_tensor, value_tensor, attention_mask=None):
""""""
batch_size, quert_length, _ = query_tensor.shape
_, value_length, _ = value_tensor.shape
query_tensor = reshape_tensor(query_tensor, (-1, self.dim))
value_tensor = reshape_tensor(value_tensor, (-1, self.dim))
query_tensor = self.query_layer(query_tensor)
key_tensor = self.key_layer(value_tensor)
value_tensor = self.value_layer(value_tensor)
query_tensor = self.transpose4score(query_tensor, (batch_size, quert_length,
self.attention_head_num,
self.size_per_head))
key_tensor = self.transpose4score(key_tensor, (batch_size, value_length,
self.attention_head_num,
self.size_per_head))
attention_scores = torch.matmul(query_tensor, key_tensor.permute(0, 1, 3, 2))
# batch_size, attention_head_num, query_length, value_length
attention_scores = attention_scores / math.sqrt(float(self.size_per_head))
if attention_mask is not None:
# batch_size, 1, sqe_len
attention_mask = torch.unsqueeze(attention_mask, 1)
# batch_size, 1, sqe_len, 1
attention_mask = torch.unsqueeze(attention_mask, -1)
# batch_size, attention_head_num, squ_len
attention_mask = attention_mask.expand(batch_size, self.attention_head_num, quert_length, value_length)
attention_scores = attention_scores * attention_mask
attention_scores = self.softmax(attention_scores)
# attention_scores = self.dropout(attention_scores)
value_tensor = reshape_tensor(value_tensor, (batch_size, value_length,
self.attention_head_num, self.size_per_head))
value_tensor = value_tensor.permute(0, 2, 1, 3)
attention = torch.matmul(attention_scores, value_tensor)
# batch_size, attention_head_num, query_length, size_per_head
# attention = torch.matmul(attention_mask, value_tensor)
attention = attention.permute(0, 2, 1, 3)
attention = reshape_tensor(attention, (batch_size, quert_length, self.dim))
return attention
class LocalBert(torch.nn.Module):
def __init__(self, bert_config, use_segments_embedding=False):
super(LocalBert, self).__init__()
self.bert_config = readjson(bert_config)
self.vocab_size = self.bert_config.get("vocab_size")
self.type_vocab_size = self.bert_config.get("type_vocab_size")
self.embedding_dim = self.bert_config.get("hidden_size")
self.use_segments_embedding = use_segments_embedding
self.word_embeddings = torch.nn.Parameter(torch.Tensor(self.vocab_size, self.embedding_dim))
if self.use_segments_embedding:
self.segments_embedding = torch.nn.Parameter(torch.Tensor(self.type_vocab_size, self.embedding_dim) )
self.init_para()
def init_para(self):
torch.nn.init.kaiming_normal_(self.word_embeddings, mode='fan_out')
torch.nn.init.kaiming_normal_(self.segments_embedding, mode='fan_out')
# self.word_embeddings = torch.nn.init.kaiming_normal(self.word_embeddings, mode='fan_out', nonlinearity)
# self.segments_embedding = torch.nn.init.kaiming_normal(self.segments_embedding, a=math.sqrt(5))
# self.word_embeddings = torch.nn.init.uniform_(self.word_embeddings, a=-0.02, b=0.02)
# self.segments_embedding = torch.nn.init.uniform_(self.segments_embedding, a=-0.02, b=0.02)
def forward(self, input_ids, segment_ids):
""""""
batch_size, sqe_length = input_ids.shape
input_ids = reshape_tensor(input_ids, [-1])
segment_ids = reshape_tensor(segment_ids, [-1])
word_embedding = self.word_embeddings[input_ids]
segment_embedding = self.segments_embedding[segment_ids]
word_embedding = word_embedding + segment_embedding
word_embedding = reshape_tensor(word_embedding, [batch_size, sqe_length, -1])
return (word_embedding, None)
class DepthwiseSeparableConv(torch.nn.Module):
def __init__(self, in_ch, out_ch, k, dim=1, bias=True):
super().__init__()
if dim == 1:
self.depthwise_conv = torch.nn.Conv1d(in_channels=in_ch, out_channels=in_ch, kernel_size=k, groups=in_ch,
padding=k // 2, bias=bias)
self.pointwise_conv = torch.nn.Conv1d(in_channels=in_ch, out_channels=out_ch, kernel_size=1, padding=0, bias=bias)
elif dim == 2:
self.depthwise_conv = torch.nn.Conv2d(in_channels=in_ch, out_channels=in_ch, kernel_size=k, groups=in_ch,
padding=k // 2, bias=bias)
self.pointwise_conv = torch.nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=1, padding=0, bias=bias)
else:
raise Exception("Wrong dimension for Depthwise Separable Convolution!")
torch.nn.init.kaiming_normal_(self.depthwise_conv.weight)
torch.nn.init.constant_(self.depthwise_conv.bias, 0.0)
torch.nn.init.kaiming_normal_(self.depthwise_conv.weight)
torch.nn.init.constant_(self.pointwise_conv.bias, 0.0)
def forward(self, x):
return self.pointwise_conv(self.depthwise_conv(x))
class ModelBaseLine(torch.nn.Module):
"""
The model of baseline.
"""
def __init__(self, bert_path, device, dropout, use_position_embedding=True,
max_postion=cf.max_postion, pos_dim=cf.bert_dim,
encoder_hidden_layers=cf.encoder_hidden_layers,
encoder_intermediate_dim=cf.encoder_intermediate_dim,
encoder_dropout_prob=cf.encoder_dropout_prob,
attention_head_num=cf.num_heads,
attention_probs_dropout_prob=cf.attention_probs_dropout_prob,
attention_use_bias=cf.attention_use_bias,
training=True,
use_pretrained_bert=cf.use_pretrained_bert,
use_conv=False,
chan_in=cf.chan_in,
chan_out=cf.chan_out,
kernel=cf.kernel):
""""""
super(ModelBaseLine, self).__init__()
self.training = training
# embedding
self.dim = pos_dim
self.bert = load_bert(bert_path, device, use_pretrained_bert, cf.bert_config, cf.use_segment_embedding, LocalBert)
self.dropout = torch.nn.Dropout(dropout)
self.layer_normal = torch.nn.LayerNorm([max_postion, pos_dim])
self.use_position_embedding = use_position_embedding
self.encoder_hidden_layers = encoder_hidden_layers
if self.use_position_embedding:
self.init_positon_embedding(max_postion, pos_dim)
# conv
self.use_conv = use_conv
self.conv = DepthwiseSeparableConv(chan_in, chan_out, kernel, dim=2)
# encoder
self.attention_layer = torch.nn.ModuleList([
Attention(pos_dim, attention_head_num, attention_probs_dropout_prob, attention_use_bias)
for i in range(self.encoder_hidden_layers)
])
self.encoder_dropout_prob = encoder_dropout_prob
self.encoder_linear_1 = torch.nn.ModuleList([torch.nn.Linear(self.dim, self.dim)
for i in range(self.encoder_hidden_layers)])
self.encoder_line_intermidia = torch.nn.ModuleList([torch.nn.Linear(self.dim, encoder_intermediate_dim)
for i in range(self.encoder_hidden_layers)])
self.encoder_line_2 = torch.nn.ModuleList([torch.nn.Linear(encoder_intermediate_dim, self.dim)
for i in range(self.encoder_hidden_layers)])
self.encoder_normal = torch.nn.ModuleList([torch.nn.LayerNorm([max_postion, pos_dim]) for _ in range(self.encoder_hidden_layers)])
# pointer
self.pointer_linear = torch.nn.Linear(self.dim, 2)
# self.pointer_softmax = torch.nn.Softmax(dim=-2)
def init_positon_embedding(self, max_postion, pos_dim):
posi_embedding = torch.Tensor(max_postion, pos_dim)
# posi_embedding = torch.nn.init.kaiming_normal(posi_embedding, a=math.sqrt(5), mode='fan_in', nonlinearity='leaky_relu')
self.position_embedding = torch.nn.Parameter(posi_embedding)
torch.nn.init.kaiming_normal_(self.position_embedding, mode='fan_out')
def embedding(self, input_ids, segment_ids):
"""
Embedding for input.
Args:
input_ids:
segment_ids:
Returns:
"""
embeddings, _ = self.bert(input_ids, segment_ids)
if self.use_position_embedding:
embeddings = embeddings + self.position_embedding
# batch_size, length, dim
embeddings = self.layer_normal(embeddings)
if self.use_conv:
embeddings = embeddings.unsqueeze(-1)
embeddings = embeddings.permute(0, 3 ,2, 1)
embeddings = self.conv(embeddings)
embeddings = embeddings.permute(0, 3, 2, 1)
embeddings = embeddings.squeeze(-1)
# embeddings = self.dropout(embeddings)
return embeddings
def encoder(self, embeddings, input_mask):
prelayer_output = embeddings
for index in range(self.encoder_hidden_layers):
# batchsize, sequence_length, posi_duim
embeddings = self.attention_layer[index](embeddings, embeddings, input_mask)
embeddings = self.encoder_linear_1[index](embeddings)
embeddings = torch.relu(embeddings)
embeddings = self.encoder_line_intermidia[index](embeddings)
# embeddings = gelu(embeddings)
embeddings = torch.relu(embeddings)
embeddings = self.encoder_line_2[index](embeddings)
embeddings = torch.relu(embeddings)
embeddings += prelayer_output
# todo: dropout、 normal
embeddings = self.encoder_normal[index](embeddings)
# embeddings = functional.leaky_relu(embeddings)
# embeddings = functional.dropout(embeddings, self.encoder_dropout_prob, self.training)
prelayer_output = embeddings
return embeddings
def pointer(self, embeddings, input_mask):
""""""
# size: batch_size, seq_length, 2
embeddings = self.pointer_linear(embeddings)
embeddings = mask(embeddings, input_mask, -2)
start_embeddings = embeddings[:, :, 0].squeeze(dim=-1)
end_embeddings = embeddings[:, :, 1].squeeze(dim=-1)
return start_embeddings, end_embeddings
# embeddings = self.pointer_softmax(embeddings)
# start_softmax = embeddings[:,:,0]
# end_softmax = embeddings[:,:,1]
# start, end, pro = find_max_proper_batch(start_softmax, end_softmax)
# return start, end, pro
def forward(self, input_ids, input_mask, segment_ids):
embedding = self.embedding(input_ids, segment_ids)
embedding = self.encoder(embedding, input_mask)
start, end = self.pointer(embedding, input_mask)
return start, end
if __name__ == "__main__":
input = torch.Tensor([[1,23,3],[4,5,6]]).long()
segment = torch.Tensor([[1,1,0], [1,0,1]]).long()
local_bert = LocalBert(r"F:\Study\Github\QANet-pytorch\data\model\bert\config.json",
True)
local_bert(input, segment)
|
def square_of_the_sum(N):
return ((N * (N + 1)) / 2) ** 2
def sum_of_the_squares(N):
return (N * (N + 1) * (2 * N + 1)) / 6
def difference(N):
diff = square_of_the_sum(N) - sum_of_the_squares(N)
print("diff is: ",diff)
return diff
difference(100)
|
# This is the code for experiments performed on the Eitz Sketches dataset for the DeLiGAN model. Minor adjustments
# in the code as suggested in the comments can be done to test GAN. Corresponding details about these experiments
# can be found in section 5.5 of the paper and the results showing the outputs can be seen in Fig 6 and Table 2,3.
import argparse
import cPickle
import time
import numpy as np
import theano as th
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import lasagne
import lasagne.layers as ll
from lasagne.init import Normal
from lasagne.layers import dnn
import nn
import sys
import plotting
import input_data_gan
# settings
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=1)
parser.add_argument('--batch_size', default=100)
parser.add_argument('--unlabeled_weight', type=float, default=1.)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--data_dir', type=str, default='../datasets/sketches/')
parser.add_argument('--results_dir', type=str, default='../results/sketches/')
parser.add_argument('--count', type=int, default=400)
args = parser.parse_args()
gen_dim = 40
disc_dim = 20
print(args)
# fixed random seeds
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))
# load sketches
data = input_data_gan.read_data_sets(args.data_dir,one_hot=True).train
trainx = data._images
print("trainx_Shape",trainx.shape)
trainx = trainx.reshape([-1,1,32,32])
trainx = trainx*2.-1
ind = rng.permutation(trainx.shape[0])
trainx = trainx[ind]
nr_batches_train = int(trainx.shape[0]/args.batch_size)
# specify generative model
noise_dim = (args.batch_size, 100)
Z = th.shared(value=rng.uniform(-1.0,1.0,noise_dim).astype(np.float32), name='Z', borrow=True)
sig = th.shared(value=rng.uniform(0.2, 0.2,noise_dim).astype(np.float32), name='sig', borrow=True)
noise = theano_rng.normal(size=noise_dim)
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(nn.MoGLayer(gen_layers[-1], noise_dim=noise_dim, z=Z, sig=sig)) # Comment this line when testing/training baseline GAN model
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*gen_dim*4, W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size,gen_dim*4,4,4)))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,gen_dim*2,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 4 -> 8
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,gen_dim,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 8 -> 16
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,1,32,32), (5,5), W=Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])
# specify discriminative model
disc_layers = [ll.InputLayer(shape=(None, 1, 32, 32))]
disc_layers.append(ll.GaussianNoiseLayer(disc_layers[-1], sigma=0.2))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1],disc_dim,(5,5), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], disc_dim, (5,5), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1],disc_dim*2,(5,5), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1],disc_dim*4,(3,3),pad=0, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=disc_dim*4, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(nn.MinibatchLayer(disc_layers[-1], num_kernels=50))
disc_layers.append(nn.weight_norm(ll.DenseLayer(disc_layers[-1], num_units=1, W=Normal(0.05), nonlinearity=None), train_g=True, init_stdv=0.1))
# costs
x_lab = T.tensor4()
temp = ll.get_output(gen_layers[-1], deterministic=False, init=True)
temp = ll.get_output(disc_layers[-1], x_lab, deterministic=False, init=True)
init_updates = [u for l in gen_layers+disc_layers for u in getattr(l,'init_updates',[])]
D_logit = ll.get_output(disc_layers[-1], x_lab, deterministic=False)
D_prob = T.nnet.sigmoid(D_logit)
D_fake_logit = ll.get_output(disc_layers[-1], gen_dat, deterministic=False)
D_fake_prob = T.nnet.sigmoid(D_fake_logit)
sig1 = gen_layers[1].get_sig() # Comment this line when training/testing the baseline GAN Model
# sigma regularizer
sigloss =T.mean((1-sig1)*(1-sig1))*.05 # Comment this line when training/testing the baseline GAN Model
#sigloss = th.shared(value=rng.uniform(0,0), name='sigloss', borrow=True) # Uncomment this line when training/testing the baseline GAN Model
#sig1 = th.shared(value=rng.uniform(0.2,0.2,noise_dim), name='sig1', borrow=True) # Uncomment this line when training/testing the baseline GAN Model
loss_real = T.mean(T.nnet.binary_crossentropy(D_prob,T.ones_like(D_prob)))
loss_fake = T.mean(T.nnet.binary_crossentropy(D_fake_prob,T.zeros_like(D_fake_prob)))
# Theano functions for training the disc net
lr = T.scalar()
disc_params = ll.get_all_params(disc_layers, trainable=True)
disc_param_updates = nn.adam_updates(disc_params, loss_real + args.unlabeled_weight*loss_fake, lr=lr, mom1=0.5)
disc_param_avg = [th.shared(np.cast[th.config.floatX](0.*p.get_value())) for p in disc_params]
disc_avg_updates = [(a,a+0.0001*(p-a)) for p,a in zip(disc_params,disc_param_avg)]
disc_avg_givens = [(p,a) for p,a in zip(disc_params,disc_param_avg)]
init_param = th.function(inputs=[x_lab], outputs=None, updates=init_updates)
train_batch_disc = th.function(inputs=[x_lab,lr], outputs=[loss_real,loss_fake], updates=disc_param_updates+disc_avg_updates)
samplefun = th.function(inputs=[],outputs=gen_dat)
# Theano functions for training the gen net
loss_gen = T.mean(T.nnet.binary_crossentropy(D_fake_prob,T.ones_like(D_fake_prob)))
gen_params = ll.get_all_params(gen_layers[-1],trainable=True)
gen_param_updates = nn.adam_updates(gen_params, loss_gen + sigloss, lr=lr, mom1=0.5)
train_batch_gen = th.function(inputs=[lr], outputs=[sig1,sigloss,loss_gen], updates=gen_param_updates)
batch_gen = th.function(inputs=[], outputs=[sig1,sigloss,loss_gen],updates=None)
# Uncomment this block when generative samples from a pretrained model
'''
f = np.load(args.results_dir + '/train/disc_params3850.npz')
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
for i,p in enumerate(disc_params):
p.set_value(param_values[i])
print("disc_params fed")
f =np.load(args.results_dir + '/train/gen_params3850.npz')
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
for i,p in enumerate(gen_params):
p.set_value(param_values[i])
print("gen_params fed")
samples=[]
for i in range(500):
sample_x = samplefun()
samples.append(sample_x)
samples = np.concatenate(samples,0)
print(samples)
#sys.exit()
np.save(args.results_dir + '/DE_samples50k.npy',samples)
print("samples saved")
sys.exit()
'''
# select labeled data
inds = rng.permutation(trainx.shape[0])
trainx = trainx[inds]
a = []
count1=0
count2=0
t1=0.70
thres=1.0
# //////////// perform training //////////////
for epoch in range(3900):
begin = time.time()
lr = np.cast[th.config.floatX](args.learning_rate * np.minimum(3.-epoch/1300., 1.))
lrd = np.cast[th.config.floatX](args.learning_rate*0.5* np.minimum(3. - epoch/1300., 1.))
if epoch==0:
init_param(trainx[:400]) # data based initialization
loss_lab = 0
loss_unl = 0
train_err = 0
# train
for t in range(nr_batches_train):
sigm , sigmloss, genloss = batch_gen()
if count1>5:
thres=min(thres+0.003,1.0)
count1=0
ll, lu =train_batch_disc(trainx[t*args.batch_size:(t+1)*args.batch_size],lrd)
#print('gen',thres)
if count2<-1:
thres=max(thres-0.003, t1)
count2=0
#print('disc', thres)
for k in xrange(5):
if(genloss>thres):
sigm, sigmloss, genloss = train_batch_gen(lr)
count1+=1
count2=0
else:
ll, lu = train_batch_disc(trainx[t*args.batch_size:(t+1)*args.batch_size],lr)
sigm, sigmloss, genloss = batch_gen()
count1=0
count2-=1
loss_lab = ll
loss_unl = lu
print("Iteration %d, time = %ds, loss_real = %.4f, loss_fake = %.4f,loss_gen= = %.4f, sigloss = %.4f" %
(epoch, time.time()-begin, loss_lab, loss_unl,genloss,sigmloss))
sys.stdout.flush()
a.append([epoch, loss_lab, loss_unl, genloss, sigmloss])
# generate samples from the model
sample_x = samplefun()
img_bhwc = np.transpose(sample_x[:100,], (0, 2, 3, 1))
NNdiff = np.sum(np.sum(np.sum(np.square(np.expand_dims(sample_x,axis=1)-np.expand_dims(trainx,axis=0)),axis=2),axis=2),axis=2)
NN = trainx[np.argmin(NNdiff,axis=1)]
NN = np.transpose(NN[:100], (0, 2, 3, 1))
img_tile = plotting.img_tile(img_bhwc, aspect_ratio=1.0, border_color=1.0, stretch=True)
NN_tile = plotting.img_tile(NN, aspect_ratio=1.0, border_color=1.0,stretch=True)
img_tile = np.concatenate((img_tile,NN_tile),axis=1)
img_tile = img_tile.reshape(img_tile.shape[0],img_tile.shape[1])
img = plotting.plot_img(img_tile, title='sketch samples')
plotting.plt.savefig(args.results_dir + '/bg_sketch_sample_minibatch.png')
if epoch%50==0:
plotting.plt.savefig(args.results_dir + '/'+str(epoch)+'.png')
# save params
np.savez(args.results_dir + '/train/disc_params' + str(epoch) + '.npz',*[p.get_value() for p in disc_params])
np.savez(args.results_dir + '/train/gen_params'+ str(epoch) + '.npz',*[p.get_value() for p in gen_params])
np.save(args.results_dir + '/train/errors.npy',a)
np.save(args.results_dir + '/train/sig.npy',sigm)
plotting.plt.close('all')
|
import contextlib
import csv
import math
from datetime import datetime
from typing import Generator, Tuple
import numpy as np
from scipy import spatial
Point = Tuple[float, float]
def _load_data() -> Generator[Tuple[float, float], None, None]:
with open('coordinates.csv', 'r') as fp:
reader = csv.reader(fp)
next(reader, None) # Skip header
for line in reader:
yield float(line[0]), float(line[1]) # Don't return the name
def distance(x: Point, y: Point) -> float:
# https://www.cuemath.com/euclidean-distance-formula/
# I know I should use the haversine distance here, but the curve of the earth ain't that wrong on like 10km...
return math.sqrt(
(x[0] - x[1]) ** 2
+ (y[0] - y[1]) ** 2
)
coordinates = np.array(list(_load_data()))
num_points = len(coordinates)
distance_matrix = spatial.distance.cdist(coordinates, coordinates, metric='euclidean')
def write_gps_file(coords: np.ndarray, name):
with open(f'{name}.gpx', 'w') as fp:
fp.write('<?xml version="1.0" encoding="utf-8"?>\n')
fp.write('<gpx version="1.1" creator="Me, myself and I">\n')
fp.write(' <rte>\n')
fp.write(f' <name>{name}</name>\n')
fp.write(' <number>0</number>\n')
for coord in coords:
fp.write(f' <rtept lat="{coord[0]}" lon="{coord[1]}"/>\n')
fp.write(' </rte>\n')
fp.write('</gpx>\n')
def default_route():
# We want to start & stop at the same place.
# So, just add the first index (0) to the route.
return np.concatenate([np.arange(num_points), [0]])
@contextlib.contextmanager
def check_time():
print("Length of default route:", cost(default_route()))
start = datetime.now()
try:
yield
finally:
stop = datetime.now()
print("Time taken:", stop - start)
def cost(route):
return distance_matrix[np.roll(route, 1), route].sum() # shifts route array by 1 in order to look at pairs of cities
|
from SocketServer import TCPServer, ThreadingMixIn, StreamRequestHandler
import ssl
from time import ctime
from kafka import KafkaProducer
# STEP to build socket server:
# 1 create handle class for handling the received request
# 2 create a server class (TCP,UDP...)
# 3 use server class to run the serve_forever() to process the request
# 4 if multiple thread needed, use ThreadingMaxIn
class Handler(StreamRequestHandler): # step 1 create handle class for handling the received request
def handle(self):
print 'connect from:', self.client_address
while True: # request processing, most of the data send from client side will be processed here
text = self.request.recv(1024)
self.request.sendall('[%s] %s' % (ctime(),text)) # return the time-stamp and massage to client to indicate massage had been successfuly sent
topic,massage= text.split()
producer.send(topic,value=massage.encode()) # kafka producer send data to kafka broker
producer.flush()
print(text)
class TCP_SSL_SERVER(TCPServer): # step 2 as SSL needed so rewrite the TCP server class with SSL
def __init__(self, server_address, RequestHandlerClass, certfile, keyfile, ssl_version=ssl.PROTOCOL_TLSv1, bind_and_activate=True):
TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
self.certfile = certfile # give cert file variables to self and using in request rewrite
self.keyfile = keyfile
self.ssl_version = ssl_version
def get_request(self): # Rewrite the request method by using ssl.wrap_socket
socket, addr = self.socket.accept()
connection = ssl.wrap_socket(socket, server_side=True, certfile = self.certfile, keyfile = self.keyfile, ssl_version = self.ssl_version)
return connection, addr
class SSL_ThreadingTCPServer(ThreadingMixIn, TCP_SSL_SERVER): pass # step 4 if multiple thread needed, use ThreadingMaxIn
producer = KafkaProducer(bootstrap_servers="192.168.31.135:9092",retries=3,acks='all',max_block_ms=100000) # create Kafka producer
print 'waiting for connection...'
SSL_ThreadingTCPServer(('192.168.31.135',9000),Handler,"/home/cz/work/python/mycertfile.pem","/home/cz/work/python/mykeyfile.pem").serve_forever()
# step 3 run multiple thread class with a ThreadingMixIn with server class
|
import boto3
import random
import string
# AWS clients
transcribe = boto3.client('transcribe')
def lambda_handler(event, context):
# Initialization
bucket = event['Records'][0]['s3']['bucket']['name']
file = event['Records'][0]['s3']['object']['key']
transcription_job = ''.join(
[random.choice(string.ascii_letters + string.digits) for n in range(32)])
# TRANSCRIBE #
transcribe.start_transcription_job(
TranscriptionJobName=str(hash(file+transcription_job)),
LanguageCode='en-US',
MediaFormat='mp4',
Media={
'MediaFileUri': 'https://{}.s3.amazonaws.com/{}'.format(bucket,
file)
},
OutputBucketName=bucket
)
return {
'statusCode': 200
}
|
import argparse
import os
import torchvision.transforms as transforms
from src.datamanager import *
from src.datamanager import DataProvider
import src.datamanager.utils as datautils
from PIL import Image
from src.configs import *
from src.ml.net import PyNet
from src.results import performance
from src.results.reid import ReIDPerformance
import torchvision.transforms.functional as F
from src.ml.net.pt import factory as model_factory
from operator import itemgetter
from src.visualization import visualizer
import src.pyrnet.model as reid_model
import src.pyrnet.features as features
import src.pyrnet.metric as metric
# Arg parser
parser = argparse.ArgumentParser(description='ReID Net')
parser.add_argument('--dataset', default='Market-1501', type=str, metavar='STR', help='dataset name (default: Market-1501)')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N', help='number of data loading workers (default: 10)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--print-freq', '--p', default=20, type=int, metavar='N', help='print frequency (default: 20)')
parser.add_argument('--net', default='densenet', type=str, metavar='STR', help='network model (default: densenet)')
parser.add_argument('--depth', default=201, type=int, metavar='N', help='network model depth (default: 201)')
parser.add_argument('--bottleneck-size', default=512, type=int, metavar='N', help='classifier bottleneck size (default: 512)')
parser.add_argument('--pyr-feature-size', default=256, type=int, metavar='N', help='pyramidal maps (default: 256)')
parser.add_argument('--pyr-feature-size-dynamic', default=True, type=bool, metavar='B', help='pyramidal feature size dependent on detail level (default: True)')
parser.add_argument('--pyr-operator', default='max_pool', type=str, metavar='STR', help='pyramidal operator (default: max_pool)')
parser.add_argument('--pyr-levels', default=-1, type=int, metavar='N', help='pyramidal levels (default: -1 => dynamic)')
parser.add_argument('--metric', default='euclidean', type=str, metavar='STR', help='metric (default: euclidean')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='filename of latest checkpoint (default: empty => latest experiment)')
parser.add_argument('--epoch', default=100, type=int, metavar='N', help='evaluation epoch, used only if --checkpoint is not set (default: 100)')
parser.add_argument('--rerank', default=False, type=bool, metavar='B', help='enable re-ranking (default: False)')
def get_args():
return parser.parse_args()
""" ================================================================================================================
EVALUATION
============================================================================================================ """
def evaluate(args, net=None, dset_train=None, dset_test=None,
display_ranking_image_index=(0, 2, 10, 40, 60, 100, 120, 140, 160, 180, 200),
layer_embeddings=('emb\\bottleneck1', 'emb\\bottleneck2', 'emb\\bottleneck3', 'emb\\bottleneck4'),
sample_size=(384, 192)):
# Just check the parsed arguments
print(vars(args))
""" ----------------------------------------------------------------------------------------------------------------
DATA
------------------------------------------------------------------------------------------------------------ """
# Imagenet Normalization
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# Data transformations
transformations = DataTransformer([
transforms.Resize(sample_size, interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize
])
transformations_flipped = DataTransformer([
transforms.Resize(sample_size, interpolation=Image.BICUBIC),
transforms.Lambda(lambda x: F.hflip(x)),
transforms.ToTensor(),
normalize])
# Dataset
if dset_train is None or dset_test is None:
dset_opts = DatasetConfig(args.dataset, None, (0.5, 0.5), cam_pair=(-1, -1))
dset = DatasetReID(dset_opts.name, os.path.join('data', dset_opts.name),
im_size=dset_opts.imsize, in_memory=False, keep_aspect_ratio=True)
# Splits
dset_train, dset_test = dset.split(dset_opts.split, save_load=True, make_each_split_contiguous=True)
# Data provider
data_provider = DataProvider(dset_test, loader=datautils.load_image, transform=transformations)
num_classes = len(dset_train.classes)
# Data provider flipped
data_provider_flipped = DataProvider(dset_test, loader=datautils.load_image, transform=transformations_flipped)
""" ----------------------------------------------------------------------------------------------------------------
MODEL
------------------------------------------------------------------------------------------------------------ """
if net is None:
# From which checkpoint do we need to load the model?
checkpoint = args.checkpoint
if checkpoint == '':
folder = os.path.join('data', 'experiments', args.dataset, os.listdir(os.path.join('data', 'experiments', args.dataset))[-1])
checkpoint = os.path.join(folder, 'checkpoint_epoch-{}.pth.tar'.format(args.epoch))
folder = os.path.dirname(checkpoint)
# Get model (load it from checkpoint!)
model = reid_model.get_model(args.net, args.depth,
data_provider[0][0].size(), num_classes,
bottleneck_size=args.bottleneck_size,
pyr_feature_size=args.pyr_feature_size,
pyr_operator=args.pyr_operator, pyr_feature_size_dynamic=args.pyr_feature_size_dynamic,
checkpoint_path=checkpoint)
# Make it parallel..
model = model_factory.make_it_parallel(model, 'multigpu')
# Net initialization
net = PyNet()
net.model = model
net.exp_folder = folder
# Move to GPU (if available)
net.to_gpu()
""" ----------------------------------------------------------------------------------------------------------------
FEATURES
------------------------------------------------------------------------------------------------------------ """
X_norm = []
data_providers = [data_provider, data_provider_flipped]
# Get features from the data providers
for ii, dp in enumerate(data_providers):
X_norm_new = features.get_features(net, [dp], layer_embeddings=layer_embeddings, batch_size=args.batch_size, workers=args.workers)
# Concat
X_norm.extend(X_norm_new)
""" ----------------------------------------------------------------------------------------------------------------
MATCH
------------------------------------------------------------------------------------------------------------ """
# Match images (re-rank if needed)
D, D_rerank, probe_info, gallery_info = metric.get_distance(dset_test, X_norm, args.metric, re_rank=args.rerank)
# Unpack matching info
probe_idx, probe_id, probe_cam = probe_info
gallery_idx, gallery_id, gallery_cam = gallery_info
""" ----------------------------------------------------------------------------------------------------------------
PERFORMANCE
------------------------------------------------------------------------------------------------------------ """
# CMC
reid_perf = ReIDPerformance()
reid_perf.compute(-D, probe_idx, gallery_idx,probe_id, gallery_id, probe_cam=probe_cam, gallery_cam=gallery_cam)
data_to_print = [reid_perf.cmc[0], reid_perf.cmc[4], reid_perf.cmc[9], reid_perf.cmc[19], reid_perf.cmc[49], reid_perf.nauc, reid_perf.ap.mean()*100]
res_string = 'CMC [1-5-10-20-50]: {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -- nAUC: {:.2f} -- mAP: {:.2f}'.format(*data_to_print)
print(res_string)
# CMC plot
visualizer.plot_cmc(reid_perf.cmc, legend='Rank-1: {:.2f} - mAP: {:.2f}'.format(reid_perf.cmc[0], reid_perf.ap.mean()*100), title=str(layer_embeddings), render_on_screen=True)
reid_perf_rerank = ReIDPerformance()
if D_rerank is not None:
# CMC with rerank
reid_perf_rerank.compute(-D_rerank, probe_idx, gallery_idx,probe_id, gallery_id, probe_cam=probe_cam, gallery_cam=gallery_cam)
data_to_print = [reid_perf_rerank.cmc[0], reid_perf_rerank.cmc[4], reid_perf_rerank.cmc[9], reid_perf_rerank.cmc[19], reid_perf_rerank.cmc[49], reid_perf_rerank.nauc, reid_perf_rerank.ap.mean()*100]
res_string = 'Re-Rank => CMC [1-5-10-20-50]: {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -- nAUC: {:.2f} -- mAP: {:.2f}'.format(*data_to_print)
print(res_string)
img = visualizer.plot_cmc(reid_perf_rerank.cmc, legend='Rank-1: {:.2f} - mAP: {:.2f}'.format(reid_perf_rerank.cmc[0], reid_perf_rerank.ap.mean()*100), title=str(layer_embeddings), render_on_screen=True)
# Matching images
dp = DataProvider(dset_test, loader=datautils.load_image)
matching_images = performance.get_matching_images(dp, dp, reid_perf.matching_indexes, N=15, selected_indexes=display_ranking_image_index)
matching_ids = itemgetter(*display_ranking_image_index)(reid_perf.matching_ids)
visualizer.display_ranked_matching_images(matching_images, matching_ids=matching_ids, im_size=(256, 256), render_on_screen=True, true_match_line_width=10)
return reid_perf, reid_perf_rerank
if __name__ == '__main__':
args = get_args()
evaluate(args)
|
#!/usr/bin/env python
# Copyright libOpenCOR contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do memory checks using Memcheck from Valgrind.
# Note: this script is based on the runMemcheck.py script that used to be part
# of KDevelop (see
# https://invent.kde.org/kdevelop/kdevelop/-/blob/3973/veritas/tests/runMemcheck.py).
import math
import multiprocessing
import os
import shutil
import sys
import xml.dom.minidom
def child_node_data(dom, tag):
res = None
element = dom.getElementsByTagName(tag)
if len(element) != 0:
res = element[0].firstChild.data
return res
class Frame:
def __init__(self, frame_node):
self.function = child_node_data(frame_node, "fn")
self.file = child_node_data(frame_node, "file")
self.line = child_node_data(frame_node, "line")
def __str__(self):
res = ""
if self.function:
res += " " + self.function
if self.file and self.line:
res += " (" + self.file + ":" + self.line + ")"
res += "\n"
return res
class BackTrace:
def __init__(self, error_node):
self.kind = error_node.getElementsByTagName("kind")[0].firstChild.data
self.stack = []
for frame in error_node.getElementsByTagName("frame"):
if child_node_data(frame, "fn"):
self.stack.append(Frame(frame))
def is_leak(self):
if self.kind != "Leak_DefinitelyLost":
return False
for frame in self.stack:
if "::TestBody" in frame.function or "libOpenCOR::" in frame.function:
return True
return False
def __str__(self):
out = " Traceback (most recent call first):\n"
for frame in self.stack:
out += str(frame)
return out
def parse_errors(output):
res = []
dom = xml.dom.minidom.parseString(output)
error = dom.getElementsByTagName("error")
for stack in error:
back_trace = BackTrace(stack)
if back_trace.is_leak():
res.append(back_trace)
return res
def garbage(line):
return not line.startswith("<unknown program name>") and not line.startswith(
"profiling:"
)
def memcheck(valgrind, test, test_path):
os.system(
valgrind
+ f" --tool=memcheck --child-silent-after-fork=yes --leak-check=full --xml=yes --xml-fd=3 --num-callers=50 {test_path} 1>{test}.txt 2>{test}.err 3>{test}.xml"
)
return "".join(list(filter(garbage, open(f"{test}.xml").readlines())))
def run_test(valgrind, test, test_path):
sys.stdout.write(f"-- Checking memory in {test} - ")
if not os.access(test_path, os.X_OK):
sys.stdout.write("not found\n")
return False
errors = parse_errors(memcheck(valgrind, test, test_path))
if len(errors) == 0:
sys.stdout.write("Success\n")
return True
sys.stdout.write("Failed\n")
for error in errors:
sys.stderr.write(str(error))
return False
if __name__ == "__main__":
if len(sys.argv) > 2:
valgrind = shutil.which("valgrind")
if valgrind == None:
sys.stderr.write("-- Valgrind could not be found.\n")
sys.exit(3)
exit_code = 0
tests_dir = sys.argv[1]
tests = sys.argv[2:]
with multiprocessing.Pool(multiprocessing.cpu_count()) as process:
results = process.starmap(
run_test,
[(valgrind, test, os.path.join(tests_dir, test)) for test in tests],
)
successes = []
failures = []
for index, result in enumerate(results):
if result:
successes.append(tests[index])
else:
failures.append(tests[index])
exit_code = 2
total = len(successes) + len(failures)
sys.stdout.write("-- Summary:\n")
sys.stdout.write(
f" {math.ceil(100.0 * len(successes) / total)}% tests passed, {len(failures)} tests failed out of {total}.\n"
)
if len(failures):
sys.stdout.write("\n")
sys.stdout.write(" The failed tests are:\n")
for failure in failures:
sys.stdout.write(f" - {failure}\n")
sys.stdout.write("\n")
sys.exit(exit_code)
else:
sys.stderr.write(
f"Usage: python3 {os.path.basename(sys.argv[0])} test_exectable_dir test_executable_1 [test_exectuable_2 ...]"
)
sys.exit(1)
|
import json
import sys
sys.path.append("..")
from util import Util
from model import ToolType
class SingletonMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class ToolTypeService(metaclass=SingletonMeta):
def findAll(self):
toolTypes = []
with open(Util.getDataFilePath("toolTypes.json"), 'r') as file:
for toolTypeJSON in json.load(file):
toolType = self.create(toolTypeJSON)
toolTypes.append(toolType)
return toolTypes
def create(self, json):
toolType = ToolType()
if json:
toolType.id = json["id"]
toolType.type = json["type"]
toolType.title = json["title"]
toolType.fields = json["fields"]
return toolType
|
from src.models.AbstractModel import AbstractModel
from keras.models import Sequential
from keras.layers import Dense, Lambda, Embedding
import keras.backend as K
from keras import optimizers
class Word2VecModel(AbstractModel):
def __init__(self, save_directory, vocabulary_size, output_dim, input_length):
super().__init__(save_directory)
self.vocabulary_size = vocabulary_size
self.output_dim = output_dim
self.input_length = input_length
self.build_model()
def build_model(self):
model = Sequential()
model.add(Embedding(input_dim = self.vocabulary_size, output_dim = self.output_dim, input_length = self.input_length))
model.add(Lambda(lambda x: K.mean(x, axis = 1), output_shape = (self.output_dim, )))
model.add(Dense(self.vocabulary_size, activation = 'softmax'))
adam = optimizers.Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay = 0, amsgrad = False)
model.compile(loss = 'categorical_crossentropy', optimizer = adam, metrics=['categorical_accuracy'])
self.model = model
return self.model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.