content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.generic.util import get_torch_version
from classy_vision.models import build_model
from test.generic.utils import compare_model_state
MODELS = {
"small_densenet": {
"name": "densenet",
"num_blocks": [1, 1, 1, 1],
"init_planes": 4,
"growth_rate": 32,
"expansion": 4,
"final_bn_relu": True,
"small_input": True,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "trunk_output",
"in_plane": 60,
"zero_init_bias": True,
}
],
},
"small_densenet_se": {
"name": "densenet",
"num_blocks": [1, 1, 1, 1],
"init_planes": 4,
"growth_rate": 32,
"expansion": 4,
"final_bn_relu": True,
"small_input": True,
"use_se": True,
"heads": [
{
"name": "fully_connected",
"unique_id": "default_head",
"num_classes": 1000,
"fork_block": "trunk_output",
"in_plane": 60,
"zero_init_bias": True,
}
],
},
}
def _find_block_full_path(model, block_name):
"""Find the full path for a given block name
e.g. block3-1 --> 3.block3-1
"""
for name, _ in model.named_modules():
if name.endswith(block_name):
return name
return None
class TestDensenet(unittest.TestCase):
def _test_model(self, model_config):
"""This test will build Densenet models, run a forward pass and
verify output shape, and then verify that get / set state
works.
I do this in one test so that we construct the model a minimum
number of times.
"""
model = build_model(model_config)
# Verify forward pass works
input = torch.ones([1, 3, 32, 32])
output = model.forward(input)
self.assertEqual(output.size(), (1, 1000))
# Verify get_set_state
new_model = build_model(model_config)
state = model.get_classy_state()
new_model.set_classy_state(state)
new_state = new_model.get_classy_state()
compare_model_state(self, state, new_state, check_heads=True)
def _test_quantize_model(self, model_config):
if get_torch_version() >= [1, 11]:
import torch.ao.quantization as tq
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx
else:
import torch.quantization as tq
from torch.quantization.quantize_fx import convert_fx, prepare_fx
# quantize model
model = build_model(model_config)
model.eval()
input = torch.ones([1, 3, 32, 32])
heads = model.get_heads()
# since prepare changes the code of ClassyBlock we need to clear head first
# and reattach it later to avoid caching
model.clear_heads()
prepare_custom_config_dict = {}
head_path_from_blocks = [
_find_block_full_path(model.features, block_name)
for block_name in heads.keys()
]
# we need to keep the modules used in head standalone since
# it will be accessed with path name directly in execution
prepare_custom_config_dict["standalone_module_name"] = [
(
head,
{"": tq.default_qconfig},
{"input_quantized_idxs": [0], "output_quantized_idxs": []},
None,
)
for head in head_path_from_blocks
]
model.initial_block = prepare_fx(model.initial_block, {"": tq.default_qconfig})
model.features = prepare_fx(
model.features,
{"": tq.default_qconfig},
prepare_custom_config_dict,
)
model.set_heads(heads)
# calibration
model(input)
heads = model.get_heads()
model.clear_heads()
model.initial_block = convert_fx(model.initial_block)
model.features = convert_fx(model.features)
model.set_heads(heads)
output = model(input)
self.assertEqual(output.size(), (1, 1000))
def test_small_densenet(self):
self._test_model(MODELS["small_densenet"])
@unittest.skipIf(
get_torch_version() < [1, 8],
"FX Graph Modee Quantization is only availablee from 1.8",
)
def test_quantized_small_densenet(self):
self._test_quantize_model(MODELS["small_densenet"])
|
n=int(input('enter a number : '))
for i in range(n):
for j in range(n):
print(j+1,end=" ")
print()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/3/4 15:03
# @Author : DaiPuWei
# E-Mail : 771830171@qq.com
# blog : https://blog.csdn.net/qq_30091945
# @Site : 中国民航大学北教25-506实验室
# @File : WineQuality_Reduction.py
# @Software: PyCharm
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from PCA.PCA import PCA
from .Load_WineQuality import Load_WineQuality
def Merge(data,row,col):
"""
这是生成DataFrame数据的函数
:param data: 数据,格式为列表(list),不是numpy.array
:param row: 行名称
:param col: 列名称
"""
data = np.array(data).T
return pd.DataFrame(data=data,columns=col,index=row)
def run_main():
"""
这是主函数
"""
# 导入红葡萄酒数据集
red_winequality_path = "./winequality-red.csv"
Red_WineQuality_Data, _ = Load_WineQuality(red_winequality_path)
print(Red_WineQuality_Data)
print(np.shape(Red_WineQuality_Data))
# 导入白葡萄酒数据集
white_winequality_path = "./winequality-white.csv"
White_WineQuality_Data,__ = Load_WineQuality(white_winequality_path)
print(White_WineQuality_Data)
print(np.shape(White_WineQuality_Data))
# 解决画图是的中文乱码问题
mpl.rcParams['font.sans-serif'] = [u'simHei']
mpl.rcParams['axes.unicode_minus'] = False
# 可视化原始数据
feature_name = ["fixed acidity","volatile acidity","citric acid","residual sugar",
"chlorides","free sulfur dioxide","total sulfur dioxide",
"density","pH","sulphates","alcohol"]
# 红葡萄酒数据集可视化
for i in range(len(feature_name)-1):
for j in range(i+1,len(feature_name)):
plt.scatter(Red_WineQuality_Data[:, i], Red_WineQuality_Data[:, j],s=5)
plt.xlabel(feature_name[i])
plt.ylabel(feature_name[j])
plt.grid(True)
plt.savefig("./红葡萄酒数据集可视化/" + str(i) + "-" + str(j) + ".jpg", bbox_inches='tight')
plt.close()
# 白葡萄酒数据集可视化
for i in range(len(feature_name)-1):
for j in range(i+1,len(feature_name)):
plt.scatter(White_WineQuality_Data[:, i], White_WineQuality_Data[:, j],s=5)
plt.xlabel(feature_name[i])
plt.ylabel(feature_name[j])
plt.grid(True)
plt.savefig("./白葡萄酒数据集可视化/"+str(i)+"-"+str(j)+".jpg", bbox_inches='tight')
plt.close()
# 构建PCA类
red_pca = PCA(Red_WineQuality_Data,feature_name)
white_pca = PCA(White_WineQuality_Data,feature_name)
# 对数据集进行PCA降维,获取方差百分比以及累积方差百分比
red_var_percentage_pca,red_var_accumulation_percentage_pca,\
red_feature_name = red_pca.PCA_Reduction()
white_var_percentage_pca, white_var_accumulation_percentage_pca,\
white_feature_name = white_pca.PCA_Reduction()
# 对PCA降维的红葡萄酒数据方差百分比进行可视化
plt.plot(np.arange(11),red_var_percentage_pca,"bx-")
plt.xlabel("K")
plt.ylabel("方差所占比例")
plt.xticks(np.arange(0, 11), np.arange(1, 12))
plt.grid(True)
plt.savefig("./红葡萄酒数据属性方差百分比PCA.jpg", bbox_inches='tight')
plt.close()
# 对PCA降维的红葡萄酒数据方差累积百分比进行可视化
plt.plot(np.arange(11),red_var_accumulation_percentage_pca,"bx-")
plt.xlabel("K")
plt.ylabel("方差累积所占比例")
plt.xticks(np.arange(0, 11), np.arange(1, 12))
plt.grid(True)
plt.savefig("./红葡萄酒数据属性方差累积百分比PCA.jpg", bbox_inches='tight')
plt.close()
#保存PCA降维的红葡萄酒数据属性方差百分比和累积百分比
data = [red_var_percentage_pca,red_var_accumulation_percentage_pca]
col = ["方差所占比例","方差累积所占比例"]
ans = Merge(data,red_feature_name,col)
ans.to_excel("./红葡萄酒数据属性方差累积百分比PCA.xlsx")
# 对PCA降维的白葡萄酒数据方差百分比进行可视化
plt.plot(np.arange(11),white_var_percentage_pca,"bx-")
plt.xlabel("K")
plt.ylabel("方差所占比例")
plt.xticks(np.arange(0, 11), np.arange(1, 12))
plt.grid(True)
plt.savefig("./白葡萄酒数据属性方差百分比PCA.jpg", bbox_inches='tight')
plt.close()
# 对PCA降维的白葡萄酒数据方差累积百分比进行可视化
plt.plot(np.arange(11),white_var_accumulation_percentage_pca,"bx-")
plt.xlabel("K")
plt.ylabel("方差累积所占比例")
plt.xticks(np.arange(0,11),np.arange(1,12))
plt.grid(True)
plt.savefig("./白葡萄酒数据属性方差累积百分比PCA.jpg", bbox_inches='tight')
plt.close()
#保存PCA降维的白葡萄酒数据属性方差百分比和累积百分比
data = [white_var_percentage_pca,white_var_accumulation_percentage_pca]
col = ["方差所占比例","方差累积所占比例"]
ans = Merge(data,white_feature_name,col)
ans.to_excel("./白葡萄酒数据属性方差累积百分比PCA.xlsx")
# 对PCA降维的红葡萄酒降维数据集进行可视化
size = 5
Red_WineQuality_Data_Reduction_PCA = red_pca.get_ReductionData(size)
for i in range(size-1):
for j in range(i+1,size):
plt.scatter(Red_WineQuality_Data_Reduction_PCA[:, i],
Red_WineQuality_Data_Reduction_PCA[:, j],s=5)
plt.xlabel("主成分"+str(i+1))
plt.ylabel("主成分"+str(j+1))
plt.grid(True)
plt.savefig("./红葡萄酒数据集降维可视化PCA/"+str(i)+"-"+str(j)+".jpg"
,bbox_inches='tight')
plt.close()
# 对PCA降维的白葡萄酒数据集进行可视化
size = 6
White_WineQuality_Data_Reduction_PCA = white_pca.get_ReductionData(size)
for i in range(size-1):
for j in range(i+1,size):
plt.scatter(White_WineQuality_Data_Reduction_PCA[:, i],
White_WineQuality_Data_Reduction_PCA[:, j],s=5)
plt.xlabel("主成分"+str(i+1))
plt.ylabel("主成分"+str(j+1))
plt.grid(True)
plt.savefig("./白葡萄酒数据集降维可视化PCA/"+str(i)+"-"+str(j)+".jpg"
,bbox_inches='tight')
plt.close()
if __name__ == '__main__':
run_main()
|
""" test elstruct.writer
"""
from elstruct import writer
def test__programs():
""" test writer.programs
"""
assert set(writer.programs()) >= {
'cfour2', 'gaussian09', 'gaussian16', 'molpro2015', 'mrcc2018', 'psi4'}
def test__optimization_programs():
""" test writer.optimization_programs
"""
assert set(writer.optimization_programs()) >= {
'cfour2', 'gaussian09', 'gaussian16', 'molpro2015', 'mrcc2018', 'psi4'}
if __name__ == '__main__':
test__programs()
|
#coding:utf-8
a = open('subtopic.csv','r')
b = open('NoVec_but_subtopic.csv','r')
c = open('NoVec_and_bin.csv','r')
dict_topic_sub = {}#key=topic, value=set_of_subtopic
num=0
for i in a:
num+=1
if num>1:
LINE = i.rstrip().split(',')
topic = LINE[3]
sub = LINE[5]
if sub != '100':
dict_topic_sub.setdefault(topic,set()).add(int(sub))
dict_topic_maxsub = {}#key=topic, value=max_num_of_subtopic
for topic in dict_topic_sub:
if len(dict_topic_sub[topic]) >= 1:
maxsub_in_topic = max(list(dict_topic_sub[topic]))
#print(maxsub_in_topic)
dict_topic_maxsub[topic] = maxsub_in_topic
else:
dict_topic_maxsub[topic] = 0
d = open('NoVec_subtopic.csv','w')
pre_suggest = ''
for i in b:
LINE = i.rstrip().split(',')
ID = LINE[0]
URL = LINE[1]
suggest = LINE[2]
topic = LINE[3]
prob = LINE[4]
try:
maxsub = dict_topic_maxsub[topic]
except:
maxsub = 0
#print (maxsub)
sub = 0
if suggest != pre_suggest:
sub = maxsub + 1
dict_topic_maxsub[topic] = sub #topicのmax_num_of_subtopicを更新
else:
sub = maxsub
d.write(ID+','+URL+','+suggest+','+topic+','+prob+','+str(sub)+'\n')
pre_suggest = suggest
d.close()
e = open('NoVec_bin.csv','w')
for i in c:
LINE = i.rstrip().split(',')
ID = LINE[0]
URL = LINE[1]
suggest = LINE[2]
topic = LINE[3]
prob = LINE[4]
sub = 100
e.write(ID+','+URL+','+suggest+','+topic+','+prob+','+str(sub)+'\n')
e.close()
a.close()
b.close()
c.close()
|
from typing import TypeVar, Union
from .base import Matcher
from .equal import equal_to
T = TypeVar("T")
def as_matcher(value: Union[Matcher[T], T]) -> Matcher[T]:
if isinstance(value, Matcher):
return value
else:
return equal_to(value)
|
# -*- coding: utf-8 -*-
# 导入模块
import sys
from PyQt5.QtWidgets import QMainWindow , QApplication
from PyQt5.QtCore import Qt
### 自定义窗口类
class MyWindow( QMainWindow):
'''自定义窗口类'''
### 构造函数
def __init__(self,parent=None):
'''构造函数'''
# 调用父类构造函数
super(MyWindow,self).__init__(parent)
# 设置窗口标记(无边框 )
self.setWindowFlags( Qt.FramelessWindowHint)
# 便于显示,设置窗口背景颜色(采用QSS)
self.setStyleSheet('''background-color:blue; ''')
###覆盖函数
def showMaximized(self):
'''最大化'''
# 得到桌面控件
desktop = QApplication.desktop()
# 得到屏幕可显示尺寸
rect = desktop.availableGeometry()
# 设置窗口尺寸
self.setGeometry(rect)
# 设置窗口显示
self.show()
### 主函数
if __name__ == "__main__":
'''主函数'''
# 声明变量
app = QApplication(sys.argv)
# 创建窗口
window = MyWindow()
# 调用最大化显示
window.showMaximized()
# 应用程序事件循环
sys.exit(app.exec_())
|
from ..core.uuid import uuid_from_func
from ..core.files import load_object, save_object
from typing import Optional
from loguru import logger
from pathlib import Path
from ..core.paramconverter import ParameterConverter
def _fscache(func, directory='./.tmp', paramconverter: Optional[ParameterConverter] = None, json:bool = True):
def wrapper(*args, **kwargs):
arg_ids = {} if arg_ids is None else arg_ids
kwarg_ids = {} if kwarg_ids is None else kwarg_ids
filename = uuid_from_func(func, args, kwargs, paramconverter=paramconverter)
path = Path(directory)/filename
filetype = 'json' if json else 'pickle'
if path.exists():
try:
logger.debug(f"Loading cached method {filename}")
return load_object(path, type=filetype)
except:
logger.error(f"Could not load cached method {filename} rerunning method")
rtn = func(*args, **kwargs)
logger.debug(f"Caching method {filename}")
save_object(rtn, path, type = filetype)
return rtn
return wrapper
def fscache(func=None, *args, **kwargs):
if func is None:
def wrapper(_func):
return _fscache(_func, *args, **kwargs)
return wrapper
return _fscache(func, *args, **kwargs)
|
from microbit import *
import Robit_stepper
theBoard = Robit_stepper.Robit()
while True:
if button_a.is_pressed():
theBoard.StepperDegree(1, 500, "W")
elif button_b.is_pressed():
theBoard.StepperDegree(1, -500, "W")
else:
theBoard.MotorStopAll()
|
class Length:
__metric ={"mm": 0.001, "cm":0.01, "m":1, "km":1000, "in":0.0254, "ft":0.3048, "yd":0.9144, "mi":1609.344}
def init (self, value, unit="m"):
self.value = value
self.unit = unit
def conver2metre(self):
return self.value * Length.__metric[self.unit]
def add (self, other):
l = self.conver2metre() + other.conver2metre()
return Length(l/Length.__metric[self.unit],self.unit)
def __str__(self):
return str(self.conver2metre())
def __repr__(self):
return "Length (" + str(self.value)+",'"+self.unit +"')"
if __name__ == "__main__" :
x = Length(4)
print(x)
z = Length(4.5,"yd")+Length(1)
print(repr(z))
print(z)
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Get information about a user"
class Input:
ID = "id"
class Output:
_ID = "_id"
_TYPE = "_type"
CREATEDAT = "createdAt"
CREATEDBY = "createdBy"
HASKEY = "hasKey"
ID = "id"
NAME = "name"
PREFERENCES = "preferences"
ROLES = "roles"
STATUS = "status"
UPDATEDAT = "updatedAt"
UPDATEDBY = "updatedBy"
USER = "user"
class GetUserInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "User ID",
"description": "User ID. If empty, the current user is used",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetUserOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"_id": {
"type": "string",
"title": "_ID",
"description": "User _ID",
"order": 10
},
"_type": {
"type": "string",
"title": "Type",
"description": "User type",
"order": 2
},
"createdAt": {
"type": "integer",
"title": "Created At",
"description": "Time the user was created at in milliseconds or epoch, e.g. 1496561862924",
"order": 12
},
"createdBy": {
"type": "string",
"title": "Updated By",
"description": "Created by",
"order": 8
},
"hasKey": {
"type": "boolean",
"title": "HasKey",
"description": "User has a key",
"order": 3
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 11
},
"name": {
"type": "string",
"title": "Name",
"description": "Name",
"order": 4
},
"preferences": {
"type": "object",
"title": "Preferences",
"description": "User preferences",
"order": 13
},
"roles": {
"type": "array",
"title": "Roles",
"description": "Roles",
"items": {
"type": "string"
},
"order": 5
},
"status": {
"type": "string",
"title": "Status",
"description": "Get user status",
"order": 1
},
"updatedAt": {
"type": "integer",
"title": "Updated At",
"description": "Time the user was updated in milliseconds or epoch, e.g. 1496561862924",
"order": 6
},
"updatedBy": {
"type": "string",
"title": "Updated By",
"description": "Updated by",
"order": 9
},
"user": {
"type": "string",
"title": "User",
"description": "User",
"order": 7
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
import os
import onnx
import torch
from src.anti_spoof_predict import AntiSpoofPredict
import argparse
from onnxsim import simplify
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights_file', type=str, default='./resources/anti_spoof_models/2.7_80x80_MiniFASNetV2.pth',
help='weights file path')
parser.add_argument('--img_size', nargs='+', type=int, default=[80, 80], help='image size')
parser.add_argument('--batch_size', type=int, default=1, help='batch size')
opt = parser.parse_args()
model_test = AntiSpoofPredict(0)
model_test._load_model(opt.weights_file)
model_test.model = model_test.model.cpu()
model_test.model.eval()
if not os.path.exists('./resources/onnx'):
os.makedirs('./resources/onnx')
output_path = opt.weights_file.replace('anti_spoof_models', 'onnx').replace('pth', 'onnx')
input_shape = opt.img_size
dummy_input = torch.autograd.Variable(torch.randn(opt.batch_size, 3, input_shape[0], input_shape[1]))
torch.onnx.export(model_test.model, dummy_input, output_path, keep_initializers_as_inputs=True)
onnx_model = onnx.load(output_path) # load onnx model
model_simp, check = simplify(onnx_model)
assert check, "Simplified ONNX model could not be validated"
onnx.save(model_simp, output_path)
print('ONNX export success, saved as %s' % output_path)
|
# Pensar em inteface
# Projetar interface
|
from testify import *
import datetime
import pytz
from dmc import (
Time,
TimeInterval,
TimeSpan,
TimeIterator,
TimeSpanIterator)
class InitTimeTestCase(TestCase):
def test_direct(self):
d = Time(2014, 4, 18, 17, 50, 21)
assert_equal(d.year, 2014)
assert_equal(d.month, 4)
assert_equal(d.day, 18)
assert_equal(d.hour, 17)
assert_equal(d.minute, 50)
assert_equal(d.second, 21)
def test_direct_tz(self):
d = Time(2014, 4, 18, 17, 50, 21, tz='US/Pacific')
assert_equal(d.year, 2014)
assert_equal(d.month, 4)
assert_equal(d.day, 19)
assert_equal(d.hour, 1)
assert_equal(d.minute, 50)
assert_equal(d.second, 21)
def test_direct_local(self):
d = Time(2014, 4, 18, 17, 50, 21, local=True)
assert_equal(d.year, 2014)
assert_equal(d.month, 4)
# can't really say for sure
assert d.day in [18, 19]
assert_equal(d.minute, 50)
assert_equal(d.second, 21)
def test_timestamp(self):
ts = 1398125982.036391
t = Time.from_timestamp(ts)
assert_equal(t.year, 2014)
assert_equal(t.month, 4)
assert_equal(t.day, 22)
assert_equal(t.hour, 0)
assert_equal(t.minute, 19)
assert_equal(t.second, 42)
assert_equal(t.microsecond, 36391)
def test_datetime_naive(self):
dt = datetime.datetime(2014, 4, 18, 17, 50, 21)
t = Time.from_datetime(dt)
assert_equal(t.day, 18)
assert_equal(t.hour, 17)
assert_equal(t.minute, 50)
assert_equal(t.second, 21)
def test_datetime_tz(self):
dt = datetime.datetime(2014, 4, 18, 17, 50, 21)
dt = pytz.timezone('US/Pacific').localize(dt)
t = Time.from_datetime(dt)
assert_equal(t.year, 2014)
assert_equal(t.month, 4)
assert_equal(t.day, 19)
assert_equal(t.hour, 0)
assert_equal(t.minute, 50)
assert_equal(t.second, 21)
def test_str(self):
t = Time.from_str("2014-04-18T17:50:21.036391")
assert_equal(t.year, 2014)
assert_equal(t.month, 4)
assert_equal(t.day, 18)
assert_equal(t.hour, 17)
assert_equal(t.minute, 50)
assert_equal(t.second, 21)
def test_str_tz(self):
t = Time.from_str("2014-04-18T17:50:21.036391-07:00")
assert_equal(t.year, 2014)
assert_equal(t.month, 4)
assert_equal(t.day, 19)
assert_equal(t.hour, 0)
assert_equal(t.minute, 50)
assert_equal(t.second, 21)
def test_str_specify_tz(self):
t = Time.from_str("2014-04-18T17:50:21.036391", tz='US/Pacific')
assert_equal(t.year, 2014)
assert_equal(t.month, 4)
assert_equal(t.day, 19)
assert_equal(t.hour, 0)
assert_equal(t.minute, 50)
assert_equal(t.second, 21)
class ConvertTimeTestCase(TestCase):
@setup
def create_time(self):
self.t = Time(2014, 4, 18, 17, 50, 21, 36391)
def test_str(self):
assert_equal(self.t.to_str(), "2014-04-18T17:50:21.036391+00:00")
def test_str_tz(self):
assert_equal(self.t.to_str(tz='US/Pacific'), "2014-04-18T10:50:21.036391-07:00")
def test_str_local(self):
# We don't really konw
assert self.t.to_str(local=True)
def test_str_format(self):
assert_equal(self.t.to_str(format="%m/%d/%Y %H:%M"), "04/18/2014 17:50")
def test_timestamp(self):
assert_equal(self.t.to_timestamp(), 1397872221.036391)
def test_datetime(self):
dt = self.t.to_datetime()
assert_equal(dt.year, 2014)
assert_equal(dt.month, 4)
assert_equal(dt.day, 18)
assert_equal(dt.hour, 17)
assert_equal(dt.minute, 50)
assert_equal(dt.second, 21)
assert_equal(dt.tzinfo, pytz.UTC)
def test_datetime_tz(self):
dt = self.t.to_datetime(tz='US/Pacific')
assert_equal(dt.year, 2014)
assert_equal(dt.month, 4)
assert_equal(dt.day, 18)
assert_equal(dt.hour, 10)
assert_equal(dt.minute, 50)
assert_equal(dt.second, 21)
assert_equal(str(dt.tzinfo), 'US/Pacific')
def test_datetime_local(self):
dt = self.t.to_datetime(local=True)
assert_equal(dt.year, 2014)
assert_equal(dt.month, 4)
assert_equal(dt.minute, 50)
assert_equal(dt.second, 21)
def test_human(self):
# Just make sure it doesn't crash
assert self.t.to_human()
class ArithmeticTimeTest(TestCase):
def test_time_add(self):
t1 = Time(2014, 4, 18, 17, 50, 21)
ti = TimeInterval(2.22)
t2 = t1 + ti
assert_equal(t2.second, 23)
assert_equal(t2.microsecond, 220000)
def test_time_sub(self):
t1 = Time(2014, 4, 18, 17, 50, 21)
ti = TimeInterval(2.22)
t2 = t1 - ti
assert_equal(t2.second, 18)
assert_equal(t2.microsecond, 780000)
class InitTimeIntervalTest(TestCase):
def test_seconds(self):
i = TimeInterval(21)
assert_equal(i.seconds, 21)
assert_equal(i.microseconds, 0)
def test_float_seconds(self):
i = TimeInterval(1.2)
assert_equal(i.seconds, 1)
assert_equal(i.microseconds, 200000)
def test_minutes(self):
i = TimeInterval(minutes=2)
assert_equal(i.seconds, 120)
assert_equal(i.microseconds, 0)
def test_hours(self):
i = TimeInterval(hours=1)
assert_equal(i.seconds, 3600)
assert_equal(i.microseconds, 0)
def test_microseconds(self):
i = TimeInterval(microseconds=10)
assert_equal(i.seconds, 0)
assert_equal(i.microseconds, 10)
def test_microsecond_overflow(self):
i = TimeInterval(seconds=1.9, microseconds=200000)
assert_equal(i.seconds, 2)
assert_equal(i.microseconds, 100000)
def test_timedelta(self):
td = datetime.timedelta(days=1, seconds=10, microseconds=1000)
i = TimeInterval.from_timedelta(td)
assert_equal(i.seconds, round(td.total_seconds()))
assert_equal(i.microseconds, td.microseconds)
assert_equal(float(i), td.total_seconds())
class ConvertTimeIntervalTest(TestCase):
def test_int(self):
i = TimeInterval(4)
assert_equal(int(i), 4)
def test_int_round(self):
i = TimeInterval(4, microseconds=600000)
assert_equal(int(i), 5)
def test_float(self):
i = TimeInterval(4, microseconds=600000)
assert_equal(float(i), 4.6)
def test_str(self):
i = TimeInterval(hours=1, minutes=45, seconds=21, microseconds=600000)
assert_equal(str(i), "+01:45:21.6")
class ArithmeticTimeIntervalTest(TestCase):
def test_add(self):
i1 = TimeInterval(1)
i2 = TimeInterval(1)
i3 = i1 + i2
assert_equal(i3.seconds, 2)
def test_add_int(self):
i1 = TimeInterval(1)
i2 = 1
i3 = i1 + i2
assert_equal(i3.seconds, 2)
i4 = i2 + i1
assert_equal(i4.seconds, 2)
def test_sub(self):
i1 = TimeInterval(2)
i2 = TimeInterval(1)
i3 = i1 - i2
assert_equal(i3.seconds, 1)
def test_sub_neg(self):
i1 = TimeInterval(1)
i2 = TimeInterval(2)
i3 = i1 - i2
assert_equal(i3.seconds, -1)
def test_sub_int(self):
i1 = TimeInterval(2)
i2 = 1
i3 = i1 - i2
assert_equal(i3.seconds, 1)
i4 = i2 - i1
assert_equal(i4, -1)
def test_mul(self):
i1 = TimeInterval(2)
i2 = i1 * 3
assert_equal(int(i2), 6)
def test_div(self):
i1 = TimeInterval(5)
i2 = i1 / 2
assert_equal(float(i2), 2.5)
def test_div_micro(self):
i1 = TimeInterval(2, microseconds=22)
i2 = i1 / 2
assert_equal(i2.seconds, 1)
assert_equal(i2.microseconds, 11)
def test_abs(self):
i1 = TimeInterval(-2.22)
i2 = abs(i1)
assert_equal(float(i1), 2.22)
def test_abs(self):
i1 = TimeInterval(2.22)
i2 = abs(i1)
assert_equal(float(i1), 2.22)
def test_cmp(self):
assert_equal(TimeInterval(2.22), TimeInterval(2, microseconds=220000))
assert_gt(TimeInterval(2.22), TimeInterval(2.20))
assert_gt(TimeInterval(3.22), TimeInterval(2.5))
assert_lt(TimeInterval(0), TimeInterval(microseconds=1))
assert_lt(TimeInterval(-3), TimeInterval(2.5))
class TimeSpanTest(TestCase):
def test_iter(self):
t1 = Time.now()
t2 = Time.now() + 30
ts = TimeSpan(t1, t2)
start_t, end_t = ts
assert_equal(start_t, t1)
assert_equal(end_t, t2)
def test_get(self):
t1 = Time.now()
t2 = Time.now() + 30
ts = TimeSpan(t1, t2)
assert_equal(ts[0], t1)
assert_equal(ts[1], t2)
class TimeIteratorTest(TestCase):
def test(self):
start_t = Time.now()
end_t = start_t + 5*60
times = list(TimeIterator(TimeSpan(start_t, end_t), TimeInterval(60)))
assert_equal(len(times), 6)
assert_equal(times[0], start_t)
assert_equal(times[-1], end_t)
class TimeSpanIteratorTest(TestCase):
def test(self):
start_t = Time.now()
end_t = start_t + 5*60
times = list(TimeSpanIterator(TimeSpan(start_t, end_t), TimeInterval(60)))
assert_equal(len(times), 5)
assert_equal(times[0].start, start_t)
assert_equal(times[-1].end, end_t)
|
import torch
from torch import nn
class ConvEncoder(nn.Module):
def __init__(self, num_channels, ngf=512, **kwargs):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(num_channels, ngf // 8, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ngf // 8),
nn.LeakyReLU(),
nn.Conv2d(ngf // 8, ngf // 4, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ngf // 4),
nn.LeakyReLU(),
nn.Conv2d(ngf // 4, ngf // 2, 3, stride=4, padding=1, bias=False),
nn.BatchNorm2d(ngf // 2),
nn.LeakyReLU(),
nn.Conv2d(ngf // 2, ngf, 3, stride=4, padding=1),
nn.BatchNorm2d(ngf),
nn.LeakyReLU(),
)
def forward(self, x):
z = self.net(x)
z = z.reshape(z.shape[0], -1)
return z
class ConvDecoder(nn.Module):
def __init__(self, nz=512, nc=6, ngf=512, **kwargs):
super().__init__()
self.net = nn.Sequential(
nn.UpsamplingNearest2d(scale_factor=2),
nn.ConvTranspose2d(nz, ngf // 2, 2, stride=2, padding=0, bias=False),
nn.BatchNorm2d(ngf // 2),
nn.LeakyReLU(),
nn.UpsamplingNearest2d(scale_factor=2),
nn.ConvTranspose2d(ngf // 2, ngf // 4, 2, stride=2, padding=0, bias=False),
nn.BatchNorm2d(ngf // 4),
nn.LeakyReLU(),
nn.ConvTranspose2d(ngf // 4, ngf // 8, 2, stride=2, padding=0, bias=False),
nn.BatchNorm2d(ngf // 8),
nn.LeakyReLU(),
nn.ConvTranspose2d(ngf // 8, nc, 2, stride=2, padding=0),
)
def forward(self, z):
z = z.reshape(z.shape[0], -1, 1, 1)
# Output has six channels
output = self.net(z)
# Scales are post-processed to [0,inf] outside this method
means = torch.sigmoid(output[:, 0:3, ...])
scales = output[:, 3:6, ...]
return means, scales
class ConvEncoder2(nn.Module):
""" For 28x28 images """
def __init__(self, num_channels, h_dim, ngf, bias=False, **kwargs):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(num_channels, ngf, kernel_size=2, stride=2, bias=bias),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.Conv2d(ngf, 2 * ngf, kernel_size=2, stride=2, bias=bias),
nn.BatchNorm2d(2 * ngf),
nn.ReLU(True),
nn.Conv2d(2 * ngf, 4 * ngf, kernel_size=2, stride=2, bias=bias),
nn.BatchNorm2d(4 * ngf),
nn.ReLU(True),
nn.Conv2d(4 * ngf, h_dim, kernel_size=2, stride=2),
)
def forward(self, x):
z = self.net(x)
z = z.reshape(z.shape[0], -1)
return z
|
from pytorch_trainer.training import extensions # NOQA
from pytorch_trainer.training import triggers # NOQA
from pytorch_trainer.training import updaters # NOQA
from pytorch_trainer.training import util # NOQA
# import classes and functions
from pytorch_trainer.training.extension import Extension # NOQA
from pytorch_trainer.training.extension import make_extension # NOQA
from pytorch_trainer.training.extension import PRIORITY_EDITOR # NOQA
from pytorch_trainer.training.extension import PRIORITY_READER # NOQA
from pytorch_trainer.training.extension import PRIORITY_WRITER # NOQA
from pytorch_trainer.training.trainer import Trainer # NOQA
from pytorch_trainer.training.trigger import get_trigger # NOQA
from pytorch_trainer.training.trigger import IntervalTrigger # NOQA
from pytorch_trainer.training.updater import StandardUpdater # NOQA
from pytorch_trainer.training.updater import Updater # NOQA
|
"""This module tests the ConstantUnitaryGate class."""
from __future__ import annotations
from hypothesis import given
from bqskit.ir.gates import ConstantUnitaryGate
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
from bqskit.utils.test.strategies import unitaries
@given(unitaries())
def test_constant_unitary(utry: UnitaryMatrix) -> None:
u = ConstantUnitaryGate(utry)
assert u.num_qudits == utry.num_qudits
assert u.radixes == utry.radixes
assert u.num_params == 0
assert u.get_unitary() == u
@given(unitaries())
def test_constant_unitary_like(utry: UnitaryMatrix) -> None:
u = ConstantUnitaryGate(utry.numpy, utry.radixes)
assert u.num_qudits == utry.num_qudits
assert u.radixes == utry.radixes
assert u.num_params == 0
assert u.get_unitary() == u
|
from numpy import inf, nan
from sklearn.linear_model import ElasticNetCV as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class ElasticNetCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for ElasticNetCV Elastic Net model with iterative fitting along a regularization path.",
"allOf": [
{
"type": "object",
"required": [
"l1_ratio",
"eps",
"n_alphas",
"alphas",
"fit_intercept",
"normalize",
"precompute",
"max_iter",
"tol",
"cv",
"copy_X",
"verbose",
"n_jobs",
"positive",
"random_state",
"selection",
],
"relevantToOptimizer": [
"eps",
"n_alphas",
"fit_intercept",
"normalize",
"precompute",
"max_iter",
"tol",
"cv",
"copy_X",
"positive",
"selection",
],
"additionalProperties": False,
"properties": {
"l1_ratio": {
"XXX TODO XXX": "float or array of floats, optional",
"description": "float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties)",
"type": "number",
"default": 0.5,
},
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 0.001,
"description": "Length of the path",
},
"n_alphas": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Number of alphas along the regularization path, used for each l1_ratio.",
},
"alphas": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "numpy array, optional",
},
{"enum": [None]},
],
"default": None,
"description": "List of alphas where to compute the models",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "True | False | 'auto' | array-like",
"forOptimizer": False,
},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"distribution": "loguniform",
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"cv": {
"XXX TODO XXX": "int, cross-validation generator or an iterable, optional",
"description": "Determines the cross-validation splitting strategy",
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
"default": 3,
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": 0,
"description": "Amount of verbosity.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"positive": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, forces the coefficients to be positive.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"enum": ["random", "cyclic"],
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model with coordinate descent",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "{array-like}, shape (n_samples, n_features)",
"description": "Training data",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.ElasticNetCV#sklearn-linear_model-elasticnetcv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
set_docstrings(ElasticNetCVImpl, _combined_schemas)
ElasticNetCV = make_operator(ElasticNetCVImpl, _combined_schemas)
|
from flask import Flask, render_template
from flask import request, abort, url_for
import json
import html
from json2html import *
app = Flask(__name__)
# read file
with open('./data/products.json', 'r') as myfile:
pro_data = myfile.read()
@app.route("/products")
def products():
"""Products page for web app"""
pro_table = (json2html.convert(json = pro_data))
return render_template("products.html", table_data=pro_table)
# return render_template(
# "products.html", title="page", jsonfile=json.dumps(data))
if __name__=="__main__":
app.run(host='127.0.0.1', port=5001, debug=True)
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .calculate.action import Calculate
from .max.action import Max
|
CHAINS = [
{
"chain_code": "btc",
"shortname": "BTC",
"fee_coin": "btc",
"name": "Bitcoin",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "btc",
"qr_code_prefix": "bitcoin",
"bip44_coin_type": 0,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2WPKH-P2SH",
"chain_id": None,
"bip44_purpose_options": {"P2PKH": 44, "P2WPKH-P2SH": 49, "P2WPKH": 84},
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 546,
"blocktime_seconds": 600,
"coins": [
{
"code": "btc",
"symbol": "BTC",
"name": "Bitcoin",
"decimals": 8,
"icon": "https://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://btc1.trezor.io"},
{"class": "BlockBook", "url": "https://btc2.trezor.io"},
{"class": "BlockBook", "url": "https://btc3.trezor.io"},
{"class": "BlockBook", "url": "https://btc4.trezor.io"},
{"class": "BlockBook", "url": "https://btc5.trezor.io"},
],
},
{
"chain_code": "bch",
"shortname": "BCH",
"fee_coin": "bch",
"name": "Bcash",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "bch",
"qr_code_prefix": "BitcoinCash",
"bip44_coin_type": 145,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2PKH",
"bip44_purpose_options": {},
"chain_id": None,
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 546,
"blocktime_seconds": 600,
"coins": [
{
"code": "bch",
"symbol": "BCH",
"name": "Bitcoin Cash",
"decimals": 8,
"icon": "https://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://bch1.trezor.io"},
{"class": "BlockBook", "url": "https://bch2.trezor.io"},
{"class": "BlockBook", "url": "https://bch3.trezor.io"},
{"class": "BlockBook", "url": "https://bch4.trezor.io"},
{"class": "BlockBook", "url": "https://bch5.trezor.io"},
],
},
{
"chain_code": "ltc",
"shortname": "LTC",
"fee_coin": "ltc",
"name": "Litecoin",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "btc",
"qr_code_prefix": "Litecoin",
"bip44_coin_type": 2,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2WPKH-P2SH",
"chain_id": None,
"bip44_purpose_options": {"P2PKH": 44, "P2WPKH-P2SH": 49, "P2WPKH": 84},
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 546,
"blocktime_seconds": 150,
"coins": [
{
"code": "ltc",
"symbol": "LTC",
"name": "Litecoin",
"decimals": 8,
"icon": "http s://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://ltc1.trezor.io"},
{"class": "BlockBook", "url": "https://ltc2.trezor.io"},
{"class": "BlockBook", "url": "https://ltc3.trezor.io"},
{"class": "BlockBook", "url": "https://ltc4.trezor.io"},
{"class": "BlockBook", "url": "https://ltc5.trezor.io"},
],
},
{
"chain_code": "doge",
"shortname": "DOGE",
"fee_coin": "doge",
"name": "Dogecoin",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "btc",
"qr_code_prefix": "Dogecoin",
"bip44_coin_type": 3,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2PKH",
"bip44_purpose_options": {},
"chain_id": None,
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 99999999,
"blocktime_seconds": 60,
"coins": [
{
"code": "doge",
"symbol": "Doge",
"name": "Dogecoin",
"decimals": 8,
"icon": "https://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://doge1.trezor.io"},
{"class": "BlockBook", "url": "https://doge2.trezor.io"},
{"class": "BlockBook", "url": "https://doge3.trezor.io"},
{"class": "BlockBook", "url": "https://doge4.trezor.io"},
{"class": "BlockBook", "url": "https://doge5.trezor.io"},
],
},
{
"chain_code": "dash",
"shortname": "DASH",
"fee_coin": "dash",
"name": "Dash",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "btc",
"qr_code_prefix": "Dash",
"bip44_coin_type": 5,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2PKH",
"bip44_purpose_options": {},
"chain_id": None,
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 5460,
"blocktime_seconds": 150,
"coins": [
{
"code": "dash",
"symbol": "DASH",
"name": "Dash",
"decimals": 8,
"icon": "https://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://dash1.trezor.io"},
{"class": "BlockBook", "url": "https://dash2.trezor.io"},
{"class": "BlockBook", "url": "https://dash3.trezor.io"},
{"class": "BlockBook", "url": "https://dash4.trezor.io"},
{"class": "BlockBook", "url": "https://dash5.trezor.io"},
],
},
{
"chain_code": "zec",
"shortname": "ZEC",
"fee_coin": "zec",
"name": "Zcash",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "btc",
"qr_code_prefix": "Zcash",
"bip44_coin_type": 133,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2PKH",
"bip44_purpose_options": {},
"chain_id": None,
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 546,
"blocktime_seconds": 150,
"coins": [
{
"code": "zec",
"symbol": "ZEC",
"name": "Zcash",
"decimals": 8,
"icon": "https://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://zec1.trezor.io"},
{"class": "BlockBook", "url": "https://zec2.trezor.io"},
{"class": "BlockBook", "url": "https://zec3.trezor.io"},
{"class": "BlockBook", "url": "https://zec4.trezor.io"},
{"class": "BlockBook", "url": "https://zec5.trezor.io"},
],
},
{
"chain_code": "btg",
"shortname": "BTG",
"fee_coin": "btg",
"name": "Bgold",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "btc",
"qr_code_prefix": "BitcoinGold",
"bip44_coin_type": 156,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2WPKH-P2SH",
"chain_id": None,
"bip44_purpose_options": {"P2PKH": 44, "P2WPKH-P2SH": 49, "P2WPKH": 84},
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 546,
"blocktime_seconds": 600,
"coins": [
{
"code": "btg",
"symbol": "BTG",
"name": "Bitcoin Gold",
"decimals": 8,
"icon": "https://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://btg1.trezor.io"},
{"class": "BlockBook", "url": "https://btg2.trezor.io"},
{"class": "BlockBook", "url": "https://btg3.trezor.io"},
{"class": "BlockBook", "url": "https://btg4.trezor.io"},
{"class": "BlockBook", "url": "https://btg5.trezor.io"},
],
},
{
"chain_code": "dgb",
"shortname": "DGB",
"fee_coin": "dgb",
"name": "DigiByte",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "btc",
"qr_code_prefix": "DigiByte",
"bip44_coin_type": 20,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2WPKH-P2SH",
"chain_id": None,
"bip44_purpose_options": {"P2PKH": 44, "P2WPKH-P2SH": 49, "P2WPKH": 84},
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 546,
"blocktime_seconds": 15,
"coins": [
{
"code": "dgb",
"symbol": "DGB",
"name": "DigiByte",
"decimals": 8,
"icon": "https://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://dgb1.trezor.io"},
{"class": "BlockBook", "url": "https://dgb2.trezor.io"},
],
},
{
"chain_code": "nmc",
"shortname": "NMC",
"fee_coin": "nmc",
"name": "Namecoin",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "btc",
"qr_code_prefix": "Namecoin",
"bip44_coin_type": 7,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2PKH",
"bip44_purpose_options": {},
"chain_id": None,
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 546,
"blocktime_seconds": 600,
"coins": [
{
"code": "nmc",
"symbol": "NMC",
"name": "Namecoin",
"decimals": 8,
"icon": "https://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://nmc1.trezor.io"},
{"class": "BlockBook", "url": "https://nmc2.trezor.io"},
],
},
{
"chain_code": "vtc",
"shortname": "VTC",
"fee_coin": "vtc",
"name": "Vertcoin",
"chain_model": "utxo",
"curve": "secp256k1",
"chain_affinity": "btc",
"qr_code_prefix": "Vertcoin",
"bip44_coin_type": 28,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ACCOUNT",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": "P2WPKH-P2SH",
"chain_id": None,
"bip44_purpose_options": {"P2PKH": 44, "P2WPKH-P2SH": 49, "P2WPKH": 84},
"fee_price_decimals_for_legibility": 0,
"nonce_supported": False,
"dust_threshold": 54600,
"blocktime_seconds": 150,
"coins": [
{
"code": "vtc",
"symbol": "VTC",
"name": "Vertcoin",
"decimals": 8,
"icon": "https://onekey.243096.com/onekey/images/token/btc/BTC.png",
}
],
"clients": [
{"class": "BlockBook", "url": "https://vtc1.trezor.io"},
{"class": "BlockBook", "url": "https://vtc2.trezor.io"},
{"class": "BlockBook", "url": "https://vtc3.trezor.io"},
{"class": "BlockBook", "url": "https://vtc4.trezor.io"},
{"class": "BlockBook", "url": "https://vtc5.trezor.io"},
],
},
{
"chain_code": "eth",
"shortname": "ETH",
"fee_coin": "eth",
"name": "Ethereum",
"chain_model": "account",
"curve": "secp256k1",
"chain_affinity": "eth",
"qr_code_prefix": "ethereum",
"bip44_coin_type": 60,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ADDRESS_INDEX",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": None,
"chain_id": "1",
"bip44_purpose_options": {},
"fee_price_decimals_for_legibility": 9,
"nonce_supported": True,
"coins": [
{
"code": "eth",
"symbol": "ETH",
"name": "Ethereum",
"decimals": 18,
"icon": "https://onekey.243096.com/onekey/images/token/eth/ETH.png",
}
],
"clients": [
{"class": "Geth", "url": "https://rpc.blkdb.cn/eth"},
{"class": "Geth", "url": "https://eth1.onekey.so/rpc"},
{"class": "BlockBook", "url": "https://eth1.onekey.so"},
{"class": "BlockBook", "url": "https://eth1.trezor.io"},
{
"class": "Etherscan",
"url": "https://api-cn.etherscan.com",
"api_keys": ["R796P9T31MEA24P8FNDZBCA88UHW8YCNVW"],
},
{
"class": "Etherscan",
"url": "https://api.etherscan.io",
"api_keys": ["R796P9T31MEA24P8FNDZBCA88UHW8YCNVW"],
},
],
},
{
"chain_code": "bsc",
"shortname": "BSC",
"fee_coin": "bsc",
"name": "Binance Smart Chain",
"chain_model": "account",
"curve": "secp256k1",
"chain_affinity": "eth",
"qr_code_prefix": "bsc",
"bip44_coin_type": 60,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ADDRESS_INDEX",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": None,
"chain_id": "56",
"bip44_purpose_options": {},
"fee_price_decimals_for_legibility": 9,
"nonce_supported": True,
"coins": [
{
"code": "bsc",
"symbol": "BNB",
"name": "Binance Smart Chain",
"decimals": 18,
"icon": "https://onekey.243096.com/onekey/images/token/bsc/BSC.png",
}
],
"clients": [
{"class": "Geth", "url": "https://rpc.blkdb.cn/bsc"},
{"class": "Geth", "url": "https://bsc1.onekey.so/rpc"},
{"class": "Geth", "url": "https://bsc-dataseed1.binance.org"},
{"class": "BlockBook", "url": "https://bsc1.onekey.so"},
{
"class": "Etherscan",
"url": "https://api.bscscan.com",
"api_keys": ["VNC1NCG47T8X1Y5HZ3NSKQX2USKVWR4ZUK"],
},
],
},
{
"chain_code": "heco",
"shortname": "HECO",
"fee_coin": "heco",
"name": "Huobi ECO Chain",
"chain_model": "account",
"curve": "secp256k1",
"chain_affinity": "eth",
"qr_code_prefix": "heco",
"bip44_coin_type": 60,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ADDRESS_INDEX",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": None,
"chain_id": "128",
"bip44_purpose_options": {},
"fee_price_decimals_for_legibility": 9,
"nonce_supported": True,
"coins": [
{
"code": "heco",
"symbol": "HT",
"name": "Huobi ECO Chain",
"decimals": 18,
"icon": "https://onekey.243096.com/onekey/images/token/heco/HECO.png",
}
],
"clients": [
{"class": "Geth", "url": "https://rpc.blkdb.cn/heco"},
{"class": "Geth", "url": "https://heco1.onekey.so/rpc"},
{"class": "Geth", "url": "https://http-mainnet-node.huobichain.com"},
{"class": "BlockBook", "url": "https://heco1.onekey.so"},
{
"class": "Etherscan",
"url": "https://api.hecoinfo.com",
"api_keys": ["HDZRTIMGS873R25FCA8M7U96IIHJNNIBJD"],
},
],
},
{
"chain_code": "okt",
"shortname": "OEC",
"fee_coin": "okt",
"name": "OKExChain Mainnet",
"chain_model": "account",
"curve": "secp256k1",
"chain_affinity": "eth",
"qr_code_prefix": "okt",
"bip44_coin_type": 60,
"bip44_last_hardened_level": "ACCOUNT",
"bip44_auto_increment_level": "ADDRESS_INDEX",
"bip44_target_level": "ADDRESS_INDEX",
"default_address_encoding": None,
"chain_id": "66",
"bip44_purpose_options": {},
"fee_price_decimals_for_legibility": 9,
"nonce_supported": True,
"coins": [
{
"code": "okt",
"symbol": "OKT",
"name": "OKExChain Mainnet",
"decimals": 18,
"icon": "https://onekey.243096.com/onekey/images/token/okt/OKT.png",
}
],
"clients": [{"class": "Geth", "url": "https://exchainrpc.okex.org"}],
},
]
|
from iotempower import *
# general init
w = wifi("ehdemo-iotempire", "internetofthings", reset=False)
mqtt("192.168.10.254", "testfloor/devkit1-01", user="homeassistant", password="internetofthings",
client_id="devkit1-01")
# devkit1 button/led shield
button("lower", d0, "depressed", "pressed")
button("left", d3)
button("right", d6)
out("blue", onboardled, "off", "on")
out("red", d7)
out("yellow", d8)
# devkit1 lcd screen
d = display("dp1", d2, d5)
d.println("Current IP: ")
d.println(wip)
# devkit1 humidity temperature sensor
ht("ht1", d1)
# transmit loop, transmit all values every 5ms
transmit(5)
|
import torch
from torchvision import transforms, datasets
from torchvision.datasets.folder import find_classes
from .sampler import BalancedSubsetRandomSampler, RandomSampler
def folder(root, num_workers=2, batch_size=64, img_size=224, sample_per_class=-1, data_augmentation=False):
base_transform = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
if data_augmentation:
data_transform = transforms.Compose([
transforms.RandomResizedCrop(img_size, scale=(0.25, 1)),
transforms.RandomHorizontalFlip(),
base_transform
])
else:
data_transform = base_transform
data = datasets.ImageFolder(root=root, transform=data_transform)
if sample_per_class == -1:
data_sampler = RandomSampler(data)
else:
data_sampler = BalancedSubsetRandomSampler(data, sample_per_class, len(find_classes(root)[0]))
data_loader = torch.utils.data.DataLoader(data,
batch_size=batch_size, sampler=data_sampler, num_workers=num_workers)
return data_loader
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.template.loader import render_to_string
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.svm import SVC
from sklearn import neighbors
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
import math
def truncate(number, digits) -> float:
stepper = 10.0 ** digits
return math.trunc(stepper * number) / stepper
def svm(request, kind):
m_area = request.POST.get('m-area')
m_compactness = request.POST.get('m-compactness')
m_concavePtr = request.POST.get('m-concavePtr')
m_concavity = request.POST.get('m-concavity')
m_fractalDimension = request.POST.get('m-fractalDimension')
m_perimeter = request.POST.get('m-perimeter')
m_radius = request.POST.get('m-radius')
m_smoothness = request.POST.get('m-smoothness')
m_symmetry = request.POST.get('m-symmetry')
m_texture = request.POST.get('m-texture')
se_area = request.POST.get('se-area')
se_compactness = request.POST.get('se-compactness')
se_concavePtr = request.POST.get('se-concavePtr')
se_concavity = request.POST.get('se-concavity')
se_fractalDimension = request.POST.get('se-fractalDimension')
se_perimeter = request.POST.get('se-perimeter')
se_radius = request.POST.get('se-radius')
se_smoothness = request.POST.get('se-smoothness')
se_symmetry = request.POST.get('se-symmetry')
se_texture = request.POST.get('se-texture')
w_area = request.POST.get('w-area')
w_compactness = request.POST.get('w-compactness')
w_concavePtr = request.POST.get('w-concavePtr')
w_concavity = request.POST.get('w-concavity')
w_fractalDimension = request.POST.get('w-fractalDimension')
w_perimeter = request.POST.get('w-perimeter')
w_radius = request.POST.get('w-radius')
w_smoothness = request.POST.get('w-smoothness')
w_symmetry = request.POST.get('w-symmetry')
w_texture = request.POST.get('w-texture')
# Load and return the breast cancer wisconsin dataset (classification).
cancer = datasets.load_breast_cancer()
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=0.3,random_state=109) # 70% training and 30% test
# Create a svm Classifier
if kind == "1":
clf = SVC(kernel='linear')
elif kind == "2":
clf = SVC(kernel='poly', degree=2, gamma='auto')
elif kind == "3":
clf = SVC(kernel='poly', degree=2, gamma='scale')
elif kind == "4":
clf = SVC(kernel='rbf', gamma='scale')
# Train the model using the training sets
clf.fit(X_train, y_train)
# Predict the response for test dataset
y_pred = clf.predict(X_test)
efficacite = metrics.accuracy_score(y_test, y_pred)
taux_fauxPositifs = 1-metrics.precision_score(y_test, y_pred)
taux_fauxNegatifs = 1-metrics.recall_score(y_test, y_pred)
# Predict
pred = clf.predict([[m_radius, m_texture, m_perimeter, m_area, m_smoothness, m_compactness, m_concavity, m_concavePtr, m_symmetry, m_fractalDimension,
se_radius, se_texture, se_perimeter, se_area, se_smoothness, se_compactness, se_concavity, se_concavePtr, se_symmetry, se_fractalDimension,
w_radius, w_texture, w_perimeter, w_area, w_smoothness, w_compactness, w_concavity, w_concavePtr, w_symmetry, w_fractalDimension]])
if pred[0] == 1:
res = "Benign"
else :
res = "Malignant"
html = render_to_string("results.html", {'result': res, 'eff': truncate(efficacite*100,4), 'taux_fp': truncate(taux_fauxPositifs*100,4), 'taux_fn': truncate(taux_fauxNegatifs*100,4)})
return HttpResponse(html)
def rf(request):
m_area = request.POST.get('m-area')
m_compactness = request.POST.get('m-compactness')
m_concavePtr = request.POST.get('m-concavePtr')
m_concavity = request.POST.get('m-concavity')
m_fractalDimension = request.POST.get('m-fractalDimension')
m_perimeter = request.POST.get('m-perimeter')
m_radius = request.POST.get('m-radius')
m_smoothness = request.POST.get('m-smoothness')
m_symmetry = request.POST.get('m-symmetry')
m_texture = request.POST.get('m-texture')
se_area = request.POST.get('se-area')
se_compactness = request.POST.get('se-compactness')
se_concavePtr = request.POST.get('se-concavePtr')
se_concavity = request.POST.get('se-concavity')
se_fractalDimension = request.POST.get('se-fractalDimension')
se_perimeter = request.POST.get('se-perimeter')
se_radius = request.POST.get('se-radius')
se_smoothness = request.POST.get('se-smoothness')
se_symmetry = request.POST.get('se-symmetry')
se_texture = request.POST.get('se-texture')
w_area = request.POST.get('w-area')
w_compactness = request.POST.get('w-compactness')
w_concavePtr = request.POST.get('w-concavePtr')
w_concavity = request.POST.get('w-concavity')
w_fractalDimension = request.POST.get('w-fractalDimension')
w_perimeter = request.POST.get('w-perimeter')
w_radius = request.POST.get('w-radius')
w_smoothness = request.POST.get('w-smoothness')
w_symmetry = request.POST.get('w-symmetry')
w_texture = request.POST.get('w-texture')
cancer = datasets.load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=0.3,random_state=109) # 70% training and 30% test
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
efficacite = metrics.accuracy_score(y_test, y_pred)
taux_fauxPositifs = 1-metrics.precision_score(y_test, y_pred)
taux_fauxNegatifs = 1-metrics.recall_score(y_test, y_pred)
pred = clf.predict([[m_radius, m_texture, m_perimeter, m_area, m_smoothness, m_compactness, m_concavity, m_concavePtr, m_symmetry, m_fractalDimension,
se_radius, se_texture, se_perimeter, se_area, se_smoothness, se_compactness, se_concavity, se_concavePtr, se_symmetry, se_fractalDimension,
w_radius, w_texture, w_perimeter, w_area, w_smoothness, w_compactness, w_concavity, w_concavePtr, w_symmetry, w_fractalDimension]])
if pred[0] == 1:
res = "Benign"
else :
res = "Malignant"
html = render_to_string("results.html", {'result': res, 'eff': truncate(efficacite*100,4), 'taux_fp': truncate(taux_fauxPositifs*100,4), 'taux_fn': truncate(taux_fauxNegatifs*100,4)})
return HttpResponse(html)
def dt(request):
m_area = request.POST.get('m-area')
m_compactness = request.POST.get('m-compactness')
m_concavePtr = request.POST.get('m-concavePtr')
m_concavity = request.POST.get('m-concavity')
m_fractalDimension = request.POST.get('m-fractalDimension')
m_perimeter = request.POST.get('m-perimeter')
m_radius = request.POST.get('m-radius')
m_smoothness = request.POST.get('m-smoothness')
m_symmetry = request.POST.get('m-symmetry')
m_texture = request.POST.get('m-texture')
se_area = request.POST.get('se-area')
se_compactness = request.POST.get('se-compactness')
se_concavePtr = request.POST.get('se-concavePtr')
se_concavity = request.POST.get('se-concavity')
se_fractalDimension = request.POST.get('se-fractalDimension')
se_perimeter = request.POST.get('se-perimeter')
se_radius = request.POST.get('se-radius')
se_smoothness = request.POST.get('se-smoothness')
se_symmetry = request.POST.get('se-symmetry')
se_texture = request.POST.get('se-texture')
w_area = request.POST.get('w-area')
w_compactness = request.POST.get('w-compactness')
w_concavePtr = request.POST.get('w-concavePtr')
w_concavity = request.POST.get('w-concavity')
w_fractalDimension = request.POST.get('w-fractalDimension')
w_perimeter = request.POST.get('w-perimeter')
w_radius = request.POST.get('w-radius')
w_smoothness = request.POST.get('w-smoothness')
w_symmetry = request.POST.get('w-symmetry')
w_texture = request.POST.get('w-texture')
cancer = datasets.load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=0.3,random_state=109) # 70% training and 30% test
clf = DecisionTreeClassifier()
t = clf.fit(X_train, y_train)
fig, ax = plt.subplots(figsize=(25, 25))
tree.plot_tree(t, feature_names=cancer.feature_names, class_names=cancer.target_names, max_depth=6, fontsize=10, filled=True)
fig.savefig('static/static_local/result_tree/tree.png')
y_pred = clf.predict(X_test)
efficacite = metrics.accuracy_score(y_test, y_pred)
taux_fauxPositifs = 1-metrics.precision_score(y_test, y_pred)
taux_fauxNegatifs = 1-metrics.recall_score(y_test, y_pred)
pred = clf.predict([[m_radius, m_texture, m_perimeter, m_area, m_smoothness, m_compactness, m_concavity, m_concavePtr, m_symmetry, m_fractalDimension,
se_radius, se_texture, se_perimeter, se_area, se_smoothness, se_compactness, se_concavity, se_concavePtr, se_symmetry, se_fractalDimension,
w_radius, w_texture, w_perimeter, w_area, w_smoothness, w_compactness, w_concavity, w_concavePtr, w_symmetry, w_fractalDimension]])
if pred[0] == 1:
res = "Benign"
else :
res = "Malignant"
html = render_to_string("results_dt.html", {'result': res, 'eff': truncate(efficacite*100,4), 'taux_fp': truncate(taux_fauxPositifs*100,4), 'taux_fn': truncate(taux_fauxNegatifs*100,4)})
return HttpResponse(html)
def knn(request):
m_area = request.POST.get('m-area')
m_compactness = request.POST.get('m-compactness')
m_concavePtr = request.POST.get('m-concavePtr')
m_concavity = request.POST.get('m-concavity')
m_fractalDimension = request.POST.get('m-fractalDimension')
m_perimeter = request.POST.get('m-perimeter')
m_radius = request.POST.get('m-radius')
m_smoothness = request.POST.get('m-smoothness')
m_symmetry = request.POST.get('m-symmetry')
m_texture = request.POST.get('m-texture')
se_area = request.POST.get('se-area')
se_compactness = request.POST.get('se-compactness')
se_concavePtr = request.POST.get('se-concavePtr')
se_concavity = request.POST.get('se-concavity')
se_fractalDimension = request.POST.get('se-fractalDimension')
se_perimeter = request.POST.get('se-perimeter')
se_radius = request.POST.get('se-radius')
se_smoothness = request.POST.get('se-smoothness')
se_symmetry = request.POST.get('se-symmetry')
se_texture = request.POST.get('se-texture')
w_area = request.POST.get('w-area')
w_compactness = request.POST.get('w-compactness')
w_concavePtr = request.POST.get('w-concavePtr')
w_concavity = request.POST.get('w-concavity')
w_fractalDimension = request.POST.get('w-fractalDimension')
w_perimeter = request.POST.get('w-perimeter')
w_radius = request.POST.get('w-radius')
w_smoothness = request.POST.get('w-smoothness')
w_symmetry = request.POST.get('w-symmetry')
w_texture = request.POST.get('w-texture')
cancer = datasets.load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=0.3,random_state=109) # 70% training and 30% test
knn = neighbors.KNeighborsClassifier(n_neighbors=15, weights='uniform')
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
efficacite = metrics.accuracy_score(y_test, y_pred)
taux_fauxPositifs = 1-metrics.precision_score(y_test, y_pred)
taux_fauxNegatifs = 1-metrics.recall_score(y_test, y_pred)
pred = knn.predict([[m_radius, m_texture, m_perimeter, m_area, m_smoothness, m_compactness, m_concavity, m_concavePtr, m_symmetry, m_fractalDimension,
se_radius, se_texture, se_perimeter, se_area, se_smoothness, se_compactness, se_concavity, se_concavePtr, se_symmetry, se_fractalDimension,
w_radius, w_texture, w_perimeter, w_area, w_smoothness, w_compactness, w_concavity, w_concavePtr, w_symmetry, w_fractalDimension]])
if pred[0] == 1:
res = "Benign"
else :
res = "Malignant"
html = render_to_string("results.html", {'result': res, 'eff': truncate(efficacite*100,4), 'taux_fp': truncate(taux_fauxPositifs*100,4), 'taux_fn': truncate(taux_fauxNegatifs*100,4)})
return HttpResponse(html)
|
import os
import torch
import torch.utils.ffi
from torch.utils.ffi import create_extension
strBasepath = os.path.split(os.path.abspath(__file__))[0] + '/'
print("strBasepath")
print(strBasepath)
strHeaders = ['src/my_lib.h']
strSources = ['src/my_lib.c']
strDefines = []
strObjects = []
if torch.cuda.is_available() == True:
strHeaders += ['src/my_lib_cuda.h']#['src/my_lib.h','src/my_lib_cuda.h']
strSources += ['src/my_lib_cuda.c']# ['src/my_lib.c','src/my_lib_cuda.c']
strDefines += [('WITH_CUDA', None)]
strObjects += ['src/my_lib_kernel.o']
# end
objectExtension = torch.utils.ffi.create_extension(
name='_ext.my_lib',
headers=strHeaders,
sources=strSources,
verbose=True,
with_cuda=any(strDefine[0] == 'WITH_CUDA' for strDefine in strDefines),
package=False,
relative_to=strBasepath,
include_dirs=[os.path.expandvars('$CUDA_HOME') + '/include'],
define_macros=strDefines,
extra_objects=[os.path.join(strBasepath, strObject) for strObject in strObjects]
)
import shutil
if __name__ == '__main__':
objectExtension.build()
|
import numpy as np
import cv2
import os
#SEBASTIAN DE LA CRUZ GUTIERREZ PUJ
class colorImage: #Crear clase
def __init__(self,path_file): #se define el constructor
self.image = cv2.imread(path_file,1) #se carga la imagen via opencv
def displayProperties(self): #se define el metodo para visualizar en la pantalla via print el alto y ancho de la imagen
print(f'El ancho de la imgen es {self.image.shape[1]}.El alto de la imagen es {self.image.shape[0]}') #usando .shape optenemos y vizualizamos el alto y ancho de la imagen
def makeGray(self): #se define el metdo para devolver la version del grises de la imagen.
print(f'imagen en grices',cv2.cvtColor(self.image,cv2.COLOR_BGR2GRAY)) #usando cv2.COLOR_BGR2GRAY se optiene la version de grices de la imagen
def colorizeRGB(self,canal_color): #se define el metodo para colorizar de blue,green, o red
if canal_color =='BLUE': #se realiza la logica en la que el usuario escoge entre blue,green, o red y asi escojemos como colorizar.
B=self.image.copy() #se copia la imagen original
B[:,:,1] = 0 #se hace 0 las compenentes de R y G en la copia
B[:,:,2] = 0
cv2.imshow('imagen azul', B) #se visualiza la copia.
cv2.waitKey(0)
if canal_color == 'RED':
R = self.image.copy() #se copia la imagen original
R [:, :, 0] = 0 #se hace 0 las compenentes de G y B en la copia
R [:, :, 1] = 0
cv2.imshow('imagen roja', R) #se visualiza la copia.
cv2.waitKey(0)
if canal_color == 'GREEN':
G = self.image.copy() #se copia la imagen original
G[:, :, 0] = 0 #se hace 0 las compenentes de R y B en la copia
G[:, :, 2] = 0
cv2.imshow('imagen verde', G) #se visualiza la copia.
cv2.waitKey(0)
def makeHue(self): #se define un metodo para devolver una imagen que resalta los tonos(hue)
H_image= cv2.cvtColor(self.image,cv2.COLOR_BGR2HSV) #se realiza el cambio de espacio de color a hsv con ayuda de cv2.COLOR_BGR2HSV
H_image [:,:,1] = 255 #Se lleva la componente S y V al valor constante de 255 dejando H intacta.
H_image [:,:,2] = 255
H_image = cv2.cvtColor(H_image,cv2.COLOR_HSV2BGR)#se realiza un cambio de espacio de color a RGB usando cv2.COLOR_HSV2BGR
cv2.imshow('imagen HSV',H_image)
cv2.waitKey(0)
|
import pygame as pg
import sys
from game_of_life import GameOfLife
class InputBox:
def __init__(self, x, y, w, h, text=''):
self.rect = pg.Rect(x, y, w, h)
self.color = (255, 255, 255)
self.text = text
self.font = pg.font.Font("assets/ARCADECLASSIC.ttf", 50)
self.txt_surface = self.font.render(text, True, self.color)
self.active = False
def handle_event(self, event):
if event.type == pg.MOUSEBUTTONDOWN:
# If the user clicked on the input_box rect.
if self.rect.collidepoint(event.pos):
# Toggle the active variable.
self.active = not self.active
else:
self.active = False
if event.type == pg.KEYDOWN:
if self.active:
if event.key == pg.K_RETURN:
print(self.text)
self.text = ''
elif event.key == pg.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.text += event.unicode
# Re-render the text.
self.txt_surface = self.font.render(self.text, True, self.color)
def update(self):
# Resize the box if the text is too long.
width = max(200, self.txt_surface.get_width() + 10)
self.rect.w = width
def draw(self, screen):
# Blit the text.
screen.blit(self.txt_surface, (self.rect.x + 5, self.rect.y + 5))
# Blit the rect.
pg.draw.rect(screen, self.color, self.rect, 2)
class Menu(object):
def __init__(self, height, width, screen_bg):
pg.font.init()
self.screen = pg.display.set_mode((height, width))
self.width = width
self.height = height
self.bg = screen_bg
self.font = pg.font.Font("assets/ARCADECLASSIC.ttf", 50)
self.click = False
self.random_init = False
self.n_cells = 50
self.buttons = [
("Play", self.font.render("Play", True, (255, 255, 255)), pg.Rect(100, 100, 205, 80), self.bg),
("Options", self.font.render("Options", True, (255, 255, 255)), pg.Rect(100, 200, 205, 80), self.bg),
]
self.option_buttons = [
("Random Start", self.font.render("Random Start", True, (255, 255, 255)), pg.Rect(100, 100, 205, 80), self.bg),
("Number of Cells", self.font.render("Number of Cells", True, (255, 255, 255)), pg.Rect(100, 200, 205, 80), self.bg),
]
def draw_text(self, text, font, color, screen, x, y):
text_obj = font.render(text, True, color)
text_rect = text_obj.get_rect()
text_rect.topleft = (x, y)
screen.blit(text_obj, text_rect)
def draw_buttons(self, buttons):
for text, font, rect, color in buttons:
pg.draw.rect(self.screen, color, rect)
self.screen.blit(font, rect)
def run_menu(self):
pg.init()
while True:
self.refresh_screen()
self.draw_text('Main Menu', self.font, (255, 255, 255), self.screen, 20, 20)
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
sys.exit()
elif event.type == pg.KEYDOWN:
pass
elif event.type == pg.MOUSEBUTTONDOWN:
for button in self.buttons:
if button[2].collidepoint(event.pos):
if button[0] == 'Play':
gof = GameOfLife(self.width, self.height, 50, self.random_init)
gof.run()
elif button[0] == "Options":
self.options_menu()
self.draw_buttons(self.buttons)
pg.display.update()
def refresh_screen(self):
self.screen.fill(self.bg)
def options_menu(self):
running = True
box = InputBox(550, 200, 150, 50, str(self.n_cells))
while running:
words = [
('Options', (255, 255, 255), 20, 20),
('False' if not self.random_init else 'True', (255, 255, 255), 550, 100)
]
self.refresh_screen()
for text, color, x, y in words:
self.draw_text(text, self.font, color, self.screen, x, y)
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
sys.exit()
elif event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
elif event.type == pg.MOUSEBUTTONDOWN:
for button in self.option_buttons:
if button[2].collidepoint(event.pos):
if button[0] == 'Random Start':
self.random_init = not self.random_init
box.handle_event(event)
try:
cells = int(box.text)
except Exception:
cells = 50
finally:
self.n_cells = cells
box.update()
box.draw(self.screen)
self.draw_buttons(self.option_buttons)
pg.display.flip()
|
import mysql.connector
import datetime
import common, secure
def create_connection(dbname):
db_host = secure.host() or "127.0.0.1"
cnx = mysql.connector.connect(user=secure.username(),
password=secure.password(),
host=db_host)
cnx.set_charset_collation("utf8mb4", "utf8mb4_general_ci")
cursor = cnx.cursor()
cursor.execute('CREATE DATABASE IF NOT EXISTS {}'.format(dbname))
cnx.database = dbname
return cnx
def insert_tweets(list_of_tweets, cnx):
tweets_total = len(list_of_tweets)
values_tweets = values_events = values_hashtags = values_duplicates = ""
cursor = cnx.cursor()
tweets_in_db = get_existing_tweets(cursor)
duplicate_dict = dict()
for i in range(tweets_total):
tweet_id = str(list_of_tweets[i]["id"])
if tweet_id not in tweets_in_db:
if len(values_tweets) > 0:
values_tweets += ","
values_events += ","
value_to_append = "('{}','{}','{}','{}',{},{},{},'{}',{})"
values_tweets += "".join(value_to_append.format(tweet_id,
"1", # This is hardcoded and will need to change
list_of_tweets[i]["text"].replace("'", "''"), # Escape character for apostrophes
list_of_tweets[i]["user"]["id"],
list_of_tweets[i]["latitude"],
list_of_tweets[i]["longitude"],
list_of_tweets[i]["in_reply_to_status_id"],
list_of_tweets[i]["client_name"],
list_of_tweets[i]["rt_id"]))
value_to_append = "('{}','{}','{}','{}','{}')"
values_events += "".join(value_to_append.format("1", #Replace hardcoding here too
list_of_tweets[i]["sql_date"],
list_of_tweets[i]["sql_time"],
"twitter",
tweet_id))
value_to_append = "('{}','{}','{}')"
for hashtag in list_of_tweets[i]["entities"]["hashtags"]:
if len(values_hashtags) > 0:
values_hashtags += ","
values_hashtags += "".join(value_to_append.format(tweet_id,
hashtag["indices"][0],
hashtag["text"]))
else:
# Tweet is in db, so add to duplicate list for checking
duplicate_dict[str(list_of_tweets[i]["id"])] = list_of_tweets[i]
values_duplicates = tweet_id if len(values_duplicates) == 0 else values_duplicates + ", " + tweet_id
if len(values_tweets) > 0:
sql_insert_tweets = ("INSERT INTO tweetdetails"
"(tweetid, userid, tweettext, twitteruserid, latitude, longitude, replyid, client, retweetid)"
"VALUES {}".format(values_tweets))
cursor.execute(sql_insert_tweets)
if len(values_events) > 0:
sql_insert_events = ("INSERT INTO events"
"(userid, eventdate, eventtime, eventtype, detailid)"
"VALUES {}".format(values_events))
cursor.execute(sql_insert_events)
if len(values_hashtags) > 0:
sql_insert_hashtags = ("INSERT INTO tweethashtags"
"(tweetid, ixstart, hashtag)"
"VALUES {}".format(values_hashtags))
cursor.execute(sql_insert_hashtags)
if len(values_duplicates) > 0:
sql_get_duplicate_data = ("SELECT events.detailid, eventdate, eventtime, client FROM events "
"LEFT JOIN tweetdetails ON events.detailid = tweetdetails.tweetid "
"WHERE events.detailid IN ({}) AND events.eventtype='twitter'".format(values_duplicates))
cursor.execute(sql_get_duplicate_data)
conflicting_duplicates_dict = dict()
for i in cursor:
conflicting_duplicates_dict[str(i[0])] = dict()
if duplicate_dict[str(i[0])]["client_name"] != i[3]:
conflicting_duplicates_dict[str(i[0])].update({"client_name":
duplicate_dict[str(i[0])]["client_name"]})
if duplicate_dict[str(i[0])]["sql_date"] != str(i[1]):
conflicting_duplicates_dict[str(i[0])].update({"eventdate":
duplicate_dict[str(i[0])]["sql_date"]})
if duplicate_dict[str(i[0])]["sql_time"] != str((datetime.datetime(2000, 1, 1) + i[2]).time()):
conflicting_duplicates_dict[str(i[0])].update({"eventtime":
duplicate_dict[str(i[0])]["sql_time"]})
# Optimize this with Pandas, potentially
sql_get_previous_duplicates = "SELECT * from tweetconflicts"
cursor.execute(sql_get_previous_duplicates)
unique_conflicts = str()
# Prune all the duplicate duplicates out first
for i in cursor:
current_duplicate = conflicting_duplicates_dict.get(str(i[0])) or dict()
if current_duplicate.get(i[1]) == i[2]:
conflicting_duplicates_dict[str(i[0])].pop(i[1])
# Now that we know all duplicates left are unique, add them to the string
for duplicate_tweet in conflicting_duplicates_dict:
for duplicate_item in conflicting_duplicates_dict[duplicate_tweet]:
add_to_string = "('{}','{}','{}')".format(duplicate_tweet, duplicate_item,
conflicting_duplicates_dict[duplicate_tweet][duplicate_item])
unique_conflicts = add_to_string if len(unique_conflicts) == 0 else unique_conflicts +\
", " + add_to_string
if len(unique_conflicts) > 0:
sql_insert_conflicts = "INSERT INTO tweetconflicts VALUES {}".format(unique_conflicts)
cursor.execute(sql_insert_conflicts)
cnx.commit()
cursor.close()
def insert_fitbit_sleep(sleep, user_prefs):
print(f"[{datetime.datetime.now()}] Beginning to store sleep events, total of {len(sleep)} records to process.")
proc_start_time = datetime.datetime.now()
cnx = create_connection('social')
cursor = cnx.cursor()
user_id = 1 # Static for now
values_list = str()
# Establish list of existing sleep events to eliminate duplicates
sql_get_all_logids = f"SELECT logid FROM fitbit_sleep WHERE userid = {user_id}"
cursor.execute(sql_get_all_logids)
sql_results = list()
sleep_levels = dict()
for i in cursor:
sql_results.append(i[0])
# Loop over sleep event list
for session in sleep:
log_id = session["logId"]
# Add any previously unrecorded sleep to the list of values
if log_id not in sql_results:
# Extra comma separator in between multiple values
if len(values_list) > 0:
values_list += ", "
start_date_time = session["startTime"]
end_date_time = session["endTime"]
timezone = user_prefs.timezone
duration = session["duration"]
main_sleep = 1 if session["mainSleep"] else 0
values_list += (f"('{user_id}', '{log_id}', '{start_date_time}', '{end_date_time}',"
f" '{timezone}', '{duration}', '{main_sleep}')")
sleep_levels[log_id] = session["levels"]
if len(values_list) > 0:
sql_add_to_db = ("INSERT INTO fitbit_sleep (userid, logid, startdatetime,"
" enddatetime, timezone, duration, mainsleep) "
f"VALUES {values_list}")
cursor.execute(sql_add_to_db)
# Need to find all events just added which are not yet in the events table, so they can be added.
sql_get_newly_added_sleep = ("SELECT userid, startdatetime, sleepid, logid "
"FROM fitbit_sleep WHERE sleepid NOT IN "
"(SELECT detailid FROM events WHERE eventtype = 'fitbit-sleep')")
cursor.execute(sql_get_newly_added_sleep)
new_sleep_events = list()
event_values = str()
logid_index = dict()
for i in cursor:
logid_index[i[3]] = i[2]
new_sleep_events.append(i)
# Populate secondary tables
for log_id in sleep_levels:
list_of_stages_values = list()
for level in sleep_levels[log_id]["summary"]:
count = sleep_levels[log_id]["summary"][level].get("count")
minutes = sleep_levels[log_id]["summary"][level].get("minutes")
avg_minutes = sleep_levels[log_id]["summary"][level].get("thirtyDayAvgMinutes") or 0
# The "values" section of the sql statement will go on a list to be grouped in an optimal size
stages_values = f"('{logid_index[log_id]}', '{level}', '{count}', '{minutes}', '{avg_minutes}')"
list_of_stages_values.append(stages_values)
list_of_stages_values = group_insert_into_db(list_of_stages_values, 10)
for stages_values in list_of_stages_values:
sql_add_stages = ("INSERT INTO fitbit_sleep_stages (sleepid, sleepstage, stagecount,"
" stageminutes, avgstageminutes)"
f" VALUES {stages_values}")
cursor.execute(sql_add_stages)
list_of_data_values = list()
for item in sleep_levels[log_id]["data"]:
sleep_date_time = item["dateTime"]
level = item["level"]
seconds = item["seconds"]
# group data "values" to be optimal
data_values = f"('{logid_index[log_id]}', '{sleep_date_time}', '{level}', '{seconds}')"
list_of_data_values.append(data_values)
list_of_data_values = group_insert_into_db(list_of_data_values, 100)
for data_values in list_of_data_values:
sql_add_data = ("INSERT INTO fitbit_sleep_data (sleepid, sleepdatetime, sleepstage, seconds)"
f"VALUES {data_values}")
cursor.execute(sql_add_data)
# Every event from the fitbit sleep table needs to have its time adjusted to UTC before
# going in the events table. Fitbit sleep is tracked in local time but all events in events table
# must be consistently UTC.
for event in new_sleep_events:
if len(event_values) > 0:
event_values += ", "
user_id = event[0]
event_date = common.local_to_utc(event[1], timezone=timezone).strftime("%Y-%m-%d")
event_time = common.local_to_utc(event[1], timezone=timezone).strftime("%H:%M:%S")
event_id = event[2]
event_values += f"('{user_id}', '{event_date}', '{event_time}', 'fitbit-sleep', '{event_id}')"
sql_add_new_to_events_utc = ("INSERT INTO events (userid, eventdate, eventtime, eventtype, detailid) "
f"VALUES {event_values};")
cursor.execute(sql_add_new_to_events_utc)
cnx.commit()
cursor.close()
print(f"[{datetime.datetime.now()}] Finished processing all events, time elapsed was {datetime.datetime.now() - proc_start_time}")
def get_fitbit_sleep_event(sleep_id):
sql_fitbit_sleep = ("SELECT eventdate, eventtime, sleepid, logid, startdatetime, enddatetime, timezone,"
" duration, mainsleep FROM events e LEFT JOIN fitbit_sleep f"
" ON e.detailid = f.sleepid"
f" WHERE f.sleepid = {sleep_id} AND e.eventtype = 'fitbit-sleep'")
output = get_results_for_query(sql_fitbit_sleep)
return output
def update_fitbit_sleep_timezone(sleep_id, event_date, event_time, timezone):
cnx = create_connection("social")
cursor = cnx.cursor()
sql_event_update = (f"UPDATE events SET eventdate = '{event_date}', eventtime = '{event_time}'"
f" WHERE detailid = {sleep_id} and eventtype = 'fitbit-sleep'")
cursor.execute(sql_event_update)
sql_fitbit_sleep_update = (f"UPDATE fitbit_sleep SET timezone = '{timezone}'"
f" WHERE sleepid = {sleep_id}")
cursor.execute(sql_fitbit_sleep_update)
cnx.commit()
close_connection(cnx)
def insert_foursquare_checkins(checkins, user_prefs):
cnx = create_connection('social')
cursor = cnx.cursor()
user_id = user_prefs.user_id
checkin_values = list()
event_values = list()
# Query db to see what all is in the db already
already_in_db = list()
sql_get_already_in_db = (f"SELECT checkinid FROM events e LEFT JOIN foursquare_checkins f"
f" ON e.detailid = f.eventid"
f" WHERE e.userid = {user_id} AND e.eventtype = 'foursquare'")
cursor.execute(sql_get_already_in_db)
for i in cursor:
if i:
already_in_db.append(str(i[0]))
# Pop entries from the dict which are already in the db
for entry in already_in_db:
checkins.pop(entry)
# Each values entry will contain, in this order:
# checkinid, eventtype, tzoffset, venueid
for key in checkins:
checkin = checkins[key]
checkin_values.append(f"('{checkin['id']}', '{checkin['type']}', '{checkin['timeZoneOffset']}',"
f" '{checkin['venue']['id']}', '{checkin.get_venue_name_for_sql()}',"
f" '{checkin['createdAt']}', {checkin.get_shout_for_sql()},"
f" {checkin.get_event_id_for_sql()}, {checkin.get_event_name_for_sql()},"
f" {checkin.get_primary_category_id_and_name()['id']},"
f" {checkin.get_primary_category_id_and_name()['name']})")
# Insert into db in 100 entry batches
grouped_values = group_insert_into_db(checkin_values, 100)
for events_to_insert in grouped_values:
sql_insert_checkin_data = (f"INSERT INTO foursquare_checkins (checkinid, eventtype, tzoffset, venueid,"
f" venuename, checkintime, shout, veventid, veventname, primarycatid, primarycatname)"
f" VALUES {events_to_insert};")
cursor.execute(sql_insert_checkin_data)
sql_get_ids_for_events = (f"SELECT f.checkinid, f.eventid from foursquare_checkins f WHERE f.eventid NOT IN"
f" (SELECT e.detailid FROM events e WHERE e.eventtype = 'foursquare')")
cursor.execute(sql_get_ids_for_events)
event_id_dict = dict()
# Result will be a dict with keys of checkinid and values of eventid, to use for events table detailid
for i in cursor:
event_id_dict[i[0]] = i[1]
event_values = list()
for key in checkins:
checkin = checkins[key]
event_id = event_id_dict[checkin["id"]]
event_values.append(f"('{user_id}', '{checkin.get_date_str()}', '{checkin.get_time_str()}', 'foursquare',"
f" '{event_id}')")
grouped_event_values = group_insert_into_db(event_values, 100)
for events_to_insert in grouped_event_values:
sql_insert_event_data = ("INSERT INTO events (userid, eventdate, eventtime, eventtype, detailid) "
f"VALUES {events_to_insert};")
cursor.execute(sql_insert_event_data)
cnx.commit()
def get_existing_tweets(cursor):
sql_get_all_tweet_ids = "SELECT tweetid FROM tweetdetails;"
cursor.execute(sql_get_all_tweet_ids)
output = list()
for i in cursor:
output.append(str(i[0]))
return output
def get_datetime_range(start_datetime, end_datetime, list_of_data_types):
subquery_list = list()
# TODO: This is not going to scale, so come up with a better way to handle this
# 0 : eventdate
# 1 : eventtime
# 2 : detailid / logid
# 3 : tweettext / shout
# 4 : client
# 5 : latitude
# 6 : longitude
# 7 : eventtype
# 8 : enddatetime
# 9 : sleepid
# 10: sum(stageminutes)
# 11: startdatetime
# 12: replyid
# 13: venuename
# 14: venueid
# 15: veventid
# 16: veventname
# 17: address
# 18: city
# 19: state
# 20: country
# 21: checkinid
# 22: sleep_time
# 23: timezone
twitter_sql_query = ("SELECT eventdate date, eventtime time, detailid source_id, tweettext body, client, "
"latitude, longitude, eventtype object_type, NULL end_time, NULL sleep_id, NULL rest_mins,"
" NULL start_time, replyid reply_id, NULL venue_name, NULL venue_id, NULL venue_event_id,"
" NULL venue_event_name, NULL address, NULL city, NULL state, NULL country, NULL checkin_id,"
" NULL sleep_time, NULL timezone "
"FROM tweetdetails "
"LEFT JOIN events "
"ON detailid = tweetid "
"WHERE eventtype = 'twitter' "
f"AND CONCAT(eventdate,' ',eventtime) >= '{start_datetime}' "
f"AND CONCAT(eventdate,' ',eventtime) <= '{end_datetime}' ")
fitbit_sql_query = ("SELECT eventdate date, eventtime time, f.logid source_id, NULL body, NULL client, "
"NULL latitude, NULL longitude, eventtype object_type, enddatetime end_time, f.sleepid sleep_id, "
"sum(stageminutes) rest_mins, startdatetime start_time, NULL reply_id, NULL venue_name, "
"NULL venue_id, NULL venue_event_id, NULL venue_event_name, NULL address, NULL city, "
"NULL state, NULL country, NULL checkin_id, f.duration sleep_time, f.timezone timezone "
"FROM fitbit_sleep f "
"LEFT JOIN events "
"ON detailid = f.sleepid "
"LEFT JOIN fitbit_sleep_stages s "
"ON f.sleepid = s.sleepid "
"WHERE eventtype = 'fitbit-sleep' "
f"AND CONCAT(eventdate,' ',eventtime) >= '{start_datetime}' "
f"AND CONCAT(eventdate,' ',eventtime) <= '{end_datetime}' "
f"AND sleepstage NOT LIKE '%wake' AND sleepstage NOT LIKE 'restless' "
f"GROUP BY s.sleepid, eventdate, eventtime, f.logid, f.duration, f.timezone, f.sleepid ")
foursquare_sql_query = ("SELECT e.eventdate date, e.eventtime time, NULL source_id, o.shout body, NULL client, "
"v.latitude, v.longitude, e.eventtype object_type, NULL end_time, NULL sleep_id, "
"NULL rest_mins, NULL start_time, NULL reply_id, o.venuename venue_name, "
"o.venueid venue_id, o.veventid venue_event_id, o.veventname venue_event_name, "
"v.address address, v.city city, v.state state, v.country country, o.checkinid checkin_id, "
"NULL sleep_time, NULL timezone "
"FROM foursquare_checkins o "
"LEFT JOIN events e "
"ON e.detailid = o.eventid "
"LEFT JOIN foursquare_venues v "
"ON o.venueid = v.venueid "
"WHERE e.eventtype = 'foursquare' "
f"AND CONCAT(eventdate,' ',eventtime) >= '{start_datetime}' "
f"AND CONCAT(eventdate,' ',eventtime) <= '{end_datetime}' ")
if 'twitter' in list_of_data_types:
subquery_list.append(twitter_sql_query)
if 'fitbit-sleep' in list_of_data_types:
subquery_list.append(fitbit_sql_query)
if "foursquare" in list_of_data_types:
subquery_list.append(foursquare_sql_query)
sql_query = " UNION ".join(subquery_list) + "ORDER BY date ASC, time ASC;"
query_start_time = datetime.datetime.now()
output = get_results_for_query(sql_query)
print(f'Returned query in {datetime.datetime.now() - query_start_time}')
return output
def group_insert_into_db(list_of_rows, group_size):
"""It is far more optimal to insert multiple values with a single SQL statement. However, it is possible to create
statements which are too large and cause an error. The workaround for this is to divide the values into chunks which
are large enough to be optimal while being small enough to avoid an error.
This function takes both the list of values and a size parameter, to adjust the size of each group."""
list_pos = 0
sub_list = list()
combined_list = list()
# Iterate through the whole list of items
while list_pos < len(list_of_rows):
# Start building a sub-list to later add
if len(sub_list) < group_size:
sub_list.append(list_of_rows[list_pos])
# Once the sub-list is "full" it is dumped onto the return list
else:
combined_list.append(", ".join(sub_list))
sub_list = list()
sub_list.append(list_of_rows[list_pos])
list_pos += 1
# Whatever was on the sub-list when the loop ended gets added
if len(sub_list) > 0:
combined_list.append(", ".join(sub_list))
return combined_list
def get_count_for_range(start_datetime, end_datetime):
sql_query = ("SELECT COUNT(*) count "
"FROM tweetdetails "
"LEFT JOIN events "
"ON detailid = tweetid "
"WHERE CONCAT(eventdate,' ',eventtime) >= '{}' "
"AND CONCAT(eventdate,' ',eventtime) <= '{}';".format(start_datetime, end_datetime))
output = get_results_for_query(sql_query)
return output
def generate_search_where_clause(search_list, searchable_columns):
# Used by get_search_term. This generates the rather complicated where clause to
# search for ALL search terms across ALL searchable columns. searchable_columns is
# passed in as a list of all columns to coalesce into one searchable string
where_clause = "WHERE "
where_list = []
coalesced_columns_list = []
# The coalesce is a SQL way of converting NULL fields into blanks if they don't exist
for column in searchable_columns:
coalesced_columns_list.append(f"coalesce({column}, '')")
# Coalesced columns can all be concatenated, even if one column is NULL. If these
# weren't coalesced then one NULL would be make the whole thing NULL
coalesced_columns = "concat(" + ", ' ',".join(coalesced_columns_list) + ")"
# Each keyword needs to be evaluated separately and joined by ANDs because all keywords
# must be contained somewhere in the searchable event data.
for string in search_list:
where_list.append(f"({coalesced_columns} like '%{string}%')")
where_clause += " AND ".join(where_list)
return where_clause
def get_search_term(search_term, event_types):
search_term = search_term.replace("'", "''")
search_term = search_term.replace("\\", "\\\\")
search_term = search_term.replace("%", "\\%")
search_term = search_term.replace("_", "\\_")
def keyword_parse(keyword):
if keyword in search_term:
try:
keyword_index = search_term.index(keyword)
open_quote_index = search_term.index("\"", keyword_index)
closed_quote_index = search_term.index("\"", open_quote_index + 1)
keyword_search = search_term[open_quote_index + 1:closed_quote_index]
modified_search = search_term[:keyword_index] + search_term[closed_quote_index + 1:]
except ValueError as error:
print(f"Error while parsing keyword: {error}")
keyword_search = str()
modified_search = search_term
else:
keyword_search = str()
modified_search = search_term
return keyword_search, modified_search
client_search, search_term = keyword_parse("client:\"")
client_sql = f"AND client like '%{client_search}%' " if client_search else str()
geo_search, search_term = keyword_parse("geo:")
geo_sql = f"AND latitude IS NOT NULL AND longitude IS NOT NULL " if geo_search == "true" else \
f"AND latitude IS NULL AND longitude IS NULL " if geo_search == "false" else str()
search_list = search_term.split(" ")
# The only searchable field in Twitter events is the text
twitter_searchable = ["tweettext"]
twitter_where_clause = generate_search_where_clause(search_list, twitter_searchable)
twitter_query = ("SELECT eventdate date, eventtime time, detailid source_id, tweettext body, client, "
"latitude, longitude, eventtype object_type, NULL end_time, NULL sleep_id, NULL rest_mins,"
" NULL start_time, replyid reply_id, NULL venue_name, NULL venue_id, NULL venue_event_id,"
" NULL venue_event_name, NULL address, NULL city, NULL state, NULL country, NULL checkin_id,"
" NULL sleep_time, NULL timezone "
"FROM tweetdetails "
"LEFT JOIN events "
"ON detailid = tweetid "
f"{twitter_where_clause} AND eventtype = 'twitter'"
f"{client_sql}"
f"{geo_sql}")
# Quite a lot searchable for foursquare, including the "shout," the location, the venue, event name. A lot
# of these would be NULL
fsq_searchable = ["o.shout", "o.venuename", "o.veventname", "v.city", "v.state", "v.country"]
fsq_where_clause = generate_search_where_clause(search_list, fsq_searchable)
foursquare_query = ("SELECT e.eventdate date, e.eventtime time, NULL source_id, o.shout body, NULL client, "
"v.latitude, v.longitude, e.eventtype object_type, NULL end_time, NULL sleep_id, "
"NULL rest_mins, NULL start_time, NULL reply_id, o.venuename venue_name, "
"o.venueid venue_id, o.veventid venue_event_id, o.veventname venue_event_name, "
"v.address address, v.city city, v.state state, v.country country, o.checkinid checkin_id, "
"NULL sleep_time, NULL timezone "
"FROM foursquare_checkins o "
"LEFT JOIN events e "
"ON e.detailid = o.eventid "
"LEFT JOIN foursquare_venues v "
"ON o.venueid = v.venueid "
f"{fsq_where_clause} AND e.eventtype = 'foursquare'")
subqueries_list = []
if "twitter" in event_types:
subqueries_list.append(twitter_query)
if "foursquare" in event_types and search_list != ['']:
subqueries_list.append(foursquare_query)
sql_query = " UNION ".join(subqueries_list) + "ORDER BY date ASC, time ASC;"
output = get_results_for_query(sql_query)
return output
def get_years_with_data():
cnx = create_connection("social")
cursor = cnx.cursor()
sql_query = "SELECT left(eventdate,4) FROM events GROUP BY left(eventdate,4) ORDER BY left(eventdate,4);"
cursor.execute(sql_query)
years = list()
for i in cursor:
years.append(i[0])
close_connection(cnx)
return years
def get_user_preferences(user_id):
sql_query = f"SELECT preference_key, preference_value FROM user_preference WHERE userid = {user_id};"
results = get_results_for_query(sql_query)
preferences = dict()
for result in results:
preferences[result["preference_key"]] = result["preference_value"]
return preferences
def set_user_preferences(user_id, **kwargs):
cnx = create_connection("social")
cursor = cnx.cursor()
sql_query = (f"INSERT INTO user_preference VALUES ('{user_id}', 'timezone', '{kwargs.get('timezone')}')," +
f" ('{user_id}', 'reverse_order', '{kwargs.get('reverse_order')}')"
" ON DUPLICATE KEY UPDATE preference_value=CASE" +
f" WHEN preference_key = 'timezone' THEN '{kwargs.get('timezone')}'" +
f" WHEN preference_key = 'reverse_order' THEN '{kwargs.get('reverse_order')}'"
f" ELSE NULL END;")
cursor.execute(sql_query)
cnx.commit()
close_connection(cnx)
def set_user_source_preferences(user_id, **kwargs):
cnx = create_connection("social")
cursor = cnx.cursor()
sql_query = (f"INSERT INTO user_preference VALUES ('{user_id}', 'show_twitter', '{kwargs.get('show_twitter')}'),"
f" ('{user_id}', 'show_foursquare', '{kwargs.get('show_foursquare')}'), ('{user_id}', "
f"'show_fitbit-sleep', '{kwargs.get('show_fitbit-sleep')}')"
" ON DUPLICATE KEY UPDATE preference_value=CASE"
f" WHEN preference_key = 'show_twitter' THEN '{kwargs.get('show_twitter')}'"
f" WHEN preference_key = 'show_foursquare' THEN '{kwargs.get('show_foursquare')}'"
f" WHEN preference_key = 'show_fitbit-sleep' THEN '{kwargs.get('show_fitbit-sleep')}'"
f" ELSE NULL END;")
cursor.execute(sql_query)
cnx.commit()
close_connection(cnx)
def insert_in_reply_to(tweet_id, create_date, user_name, in_reply_to_status, in_reply_to_user, status_text, user_id, lang):
cnx = create_connection("social")
cursor = cnx.cursor()
# Both these can be empty, so we need to swap out for NULL
in_reply_to_user = in_reply_to_user or "NULL"
in_reply_to_status = in_reply_to_status or "NULL"
# Drop timezone tag
create_date = create_date.strip("Z")
# format single quotes in status
status_text = status_text.replace("'", "''")
sql_query = ("INSERT INTO tweet_in_reply VALUES" +
f"({tweet_id}, '{create_date}', '{user_name}', {user_id}," +
f" {in_reply_to_status}, {in_reply_to_user}, '{status_text}', '{lang}');")
cursor.execute(sql_query)
cnx.commit()
close_connection(cnx)
def get_in_reply_to(tweet_id):
sql_query = f"SELECT tweetid, createdate, username, statustext FROM tweet_in_reply where tweetid = {tweet_id};"
reply = get_results_for_query(sql_query)
for result in reply:
output = {"id_str": str(result["tweetid"]),
"created_date": result["createdate"],
"user": {"screen_name": result["username"]},
"text": result["statustext"]}
return output
else:
return None
def insert_foursquare_venue(venue_id, **kwargs):
cnx = create_connection("social")
cursor = cnx.cursor()
name = kwargs.get("name").replace("'","''") if kwargs.get("name") else None
name = f"'{name}'" if name else "NULL"
url = f"'{kwargs.get('url')}'" if kwargs.get("url") else "NULL"
address = kwargs.get("address").replace("'", "''") if kwargs.get("address") else None
address = f"'{address}'" if address else "NULL"
postal_code = f"'{kwargs.get('postal_code')}'" if kwargs.get('postal_code') else "NULL"
cc = f"'{kwargs.get('cc')}'" if kwargs.get('cc') else "NULL"
city = kwargs.get("city").replace("'", "''") if kwargs.get("city") else None
city = f"'{city}'" if city else "NULL"
state = kwargs.get("state").replace("'", "''") if kwargs.get("state") else None
state = f"'{state}'" if state else "NULL"
country = kwargs.get("country").replace("'", "''") if kwargs.get("country") else None
country = f"'{country}'" if country else "NULL"
latitude = f'{kwargs.get("latitude")}' if kwargs.get("latitude") else "NULL"
longitude = f'{kwargs.get("longitude")}' if kwargs.get("longitude") else "NULL"
sql_statement = (f"INSERT INTO foursquare_venues VALUES" +
f" ('{venue_id}', {name}, {url}, {address}, {postal_code}, {cc}, {city}, {state}, {country}, " +
f"{latitude}, {longitude})")
cursor.execute(sql_statement)
cnx.commit()
close_connection(cnx)
def get_foursquare_venue(venue_id):
sql_query = f"SELECT * FROM foursquare_venues WHERE venueid = '{venue_id}'"
return get_results_for_query(sql_query)
def get_results_for_query(sql_query):
print(f"Fetching results for query:\n{sql_query}")
cnx = create_connection("social")
cursor = cnx.cursor()
cursor.execute(sql_query)
results = cursor.fetchall()
result_list = list()
for row in results:
result_dict = dict()
for i in range(0, len(row)):
result_dict[cursor.column_names[i]] = row[i]
result_list.append(result_dict)
close_connection(cnx)
print(f"Returning {len(result_list)} result(s)")
return result_list
def close_connection(cnx):
return cnx.close()
|
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views as account_management_views
from .forms import (
FantasySportsAuthenticationForm,
FantasySportsChangePasswordForm,
FantasySportsPasswordResetForm,
FantasySportsSetPasswordForm,
)
urlpatterns = [
url(
'^create/$',
account_management_views.CreateAccount.as_view(),
name='create',
),
url(
'^login/$',
auth_views.login,
{'template_name': 'account_management/login.html',
'authentication_form': FantasySportsAuthenticationForm,
'redirect_authenticated_user': True, },
name='login',
),
url(
'^logout/$',
auth_views.logout,
{'template_name': 'account_management/logout.html', },
name='logout',
),
url(
'^change_password/$',
auth_views.password_change,
{'template_name': 'account_management/change_password.html',
'post_change_redirect': 'account_management:change_password_complete',
'password_change_form': FantasySportsChangePasswordForm, },
name='change_password',
),
url(
'^change_password/complete/$',
auth_views.password_change_done,
{'template_name': 'account_management/change_password_complete.html', },
name='change_password_complete',
),
url(
'^password_reset/$',
auth_views.password_reset,
{'template_name': 'account_management/password_reset.html',
'email_template_name': 'account_management/password_reset.email',
'post_reset_redirect': 'account_management:password_reset_sent',
'password_reset_form': FantasySportsPasswordResetForm, },
name='password_reset',
),
url(
'^password_reset/sent/$',
auth_views.password_reset_done,
{'template_name': 'account_management/password_reset_sent.html', },
name='password_reset_sent',
),
url(
'^password_reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.password_reset_confirm,
{'template_name': 'account_management/password_reset_change_password.html',
'set_password_form': FantasySportsSetPasswordForm,
'post_reset_redirect': 'account_management:password_reset_complete', },
name='password_reset_confirm',
),
url(
'^password_reset/complete/$',
auth_views.password_reset_complete,
{'template_name': 'account_management/password_reset_complete.html', },
name='password_reset_complete',
),
]
|
from shared import OPAYGOShared
from shared_extended import OPAYGOSharedExtended
class OPAYGODecoder(object):
MAX_TOKEN_JUMP = 64
MAX_TOKEN_JUMP_COUNTER_SYNC = 100
MAX_UNUSED_OLDER_TOKENS = 8*2
@classmethod
def get_activation_value_count_and_type_from_token(cls, token, starting_code, key, last_count,
restricted_digit_set=False, used_counts=None):
if restricted_digit_set:
token = OPAYGOShared.convert_from_4_digit_token(token)
valid_older_token = False
token_base = OPAYGOShared.get_token_base(token) # We get the base of the token
current_code = OPAYGOShared.put_base_in_token(starting_code, token_base) # We put it into the starting code
starting_code_base = OPAYGOShared.get_token_base(starting_code) # We get the base of the starting code
value = cls._decode_base(starting_code_base, token_base) # If there is a match we get the value from the token
# We try all combination up until last_count + TOKEN_JUMP, or to the larger jump if syncing counter
# We could start directly the loop at the last count if we kept the token value for the last count
if value == OPAYGOShared.COUNTER_SYNC_VALUE:
max_count_try = last_count + cls.MAX_TOKEN_JUMP_COUNTER_SYNC + 1
else:
max_count_try = last_count + cls.MAX_TOKEN_JUMP + 1
for count in range(0, max_count_try):
masked_token = OPAYGOShared.put_base_in_token(current_code, token_base)
if count % 2:
type = OPAYGOShared.TOKEN_TYPE_SET_TIME
else:
type = OPAYGOShared.TOKEN_TYPE_ADD_TIME
if masked_token == token:
if cls._count_is_valid(count, last_count, value, type, used_counts):
return value, count, type
else:
valid_older_token = True
current_code = OPAYGOShared.generate_next_token(current_code, key) # If not we go to the next token
if valid_older_token:
return -2, None, None
return None, None, None
@classmethod
def _count_is_valid(cls, count, last_count, value, type, used_counts):
if value == OPAYGOShared.COUNTER_SYNC_VALUE:
if count > last_count - 30:
return True
elif count > last_count:
return True
elif cls.MAX_UNUSED_OLDER_TOKENS > 0:
if count > last_count - cls.MAX_UNUSED_OLDER_TOKENS:
if count not in used_counts and type == OPAYGOShared.TOKEN_TYPE_ADD_TIME:
return True
return False
@classmethod
def update_used_counts(cls, past_used_counts, value, new_count, type):
highest_count = max(past_used_counts) if past_used_counts else 0
if new_count > highest_count:
highest_count = new_count
bottom_range = highest_count-cls.MAX_UNUSED_OLDER_TOKENS
used_counts = []
if type != OPAYGOShared.TOKEN_TYPE_ADD_TIME or value == OPAYGOShared.COUNTER_SYNC_VALUE or value == OPAYGOShared.PAYG_DISABLE_VALUE:
# If it is not an Add-Time token, we mark all the past tokens as used in the range
for count in range(bottom_range, highest_count+1):
used_counts.append(count)
else:
# If it is an Add-Time token, we just mark the tokens actually used in the range
for count in range(bottom_range, highest_count+1):
if count == new_count or count in past_used_counts:
used_counts.append(count)
return used_counts
@classmethod
def _decode_base(cls, starting_code_base, token_base):
decoded_value = token_base - starting_code_base
if decoded_value < 0:
return decoded_value + 1000
else:
return decoded_value
@classmethod
def get_activation_value_count_from_extended_token(cls, token, starting_code, key, last_count,
restricted_digit_set=False):
if restricted_digit_set:
token = OPAYGOSharedExtended.convert_from_4_digit_token(token)
token_base = OPAYGOSharedExtended.get_token_base(token) # We get the base of the token
current_code = OPAYGOSharedExtended.put_base_in_token(starting_code, token_base) # We put it into the starting code
starting_code_base = OPAYGOSharedExtended.get_token_base(starting_code) # We get the base of the starting code
value = cls._decode_base_extended(starting_code_base, token_base) # If there is a match we get the value from the token
for count in range(0, 30):
masked_token = OPAYGOSharedExtended.put_base_in_token(current_code, token_base)
if masked_token == token and count > last_count:
clean_count = count-1
return value, clean_count
current_code = OPAYGOSharedExtended.generate_next_token(current_code, key) # If not we go to the next token
return None, None, None
@classmethod
def _decode_base_extended(cls, starting_code_base, token_base):
decoded_value = token_base - starting_code_base
if decoded_value < 0:
return decoded_value + 1000000
else:
return decoded_value
|
# -*- coding: utf-8 -*-
def test_cub_download():
from africanus.util.cub import cub_dir
cub_dir()
|
import pandas as pd
import csv
filepath = "../data/merged/watersheds_drainage_within_5percent.csv"
watershed_stats_df = pd.read_csv("../data/scrape_results/watershed_stats.csv")
indexNames = watershed_stats_df[((watershed_stats_df['drainage_area'] / watershed_stats_df['drainage_area_gross']) - 1).abs() <= 0.05].index
print(len(watershed_stats_df))
print(len(indexNames))
drainage_area_diff = (watershed_stats_df['drainage_area'] / watershed_stats_df['drainage_area_gross'])
print((drainage_area_diff - 1))
watersheds_with_5percent_diff = watershed_stats_df.iloc[indexNames]
watersheds_with_5percent_diff['mean'] = (watersheds_with_5percent_diff['mean'] / watersheds_with_5percent_diff['drainage_area']) * 1000
print(watersheds_with_5percent_diff[['average_slope', 'mean', 'most_recent_year', 'drainage_area', 'drainage_area_gross', 'latitude', 'longitude', 'average_slope']])
# export to file
# watersheds_with_5percent_diff.to_csv(filepath, index=False, header=True)
# for zone, rows in watershed_stats_df.groupby('hydrological_zone'):
# print(rows['aspect'])
|
# # def get_equipment_that_has_offensive_bonuses(player_stats, ignore, equipment_data=get_equipment_data()):
# # e = defaultdict(list)
# # for slot, slot_equipment in equipment_data.items():
# # for ID, equipment in slot_equipment.items():
# # if equipment['name'] in ignore:
# # continue
# # if any([equipment['name'].startswith(s) for s in ('Corrupted', 'Zuriel', 'Statius', 'Vesta')]):
# # continue
# # if any(equipment['equipment'][stat] > 0 for stat in ('attack_stab', 'attack_crush', 'attack_slash', 'attack_ranged', 'attack_magic', 'ranged_strength', 'magic_damage', 'melee_strength')):
# # e[slot].append(equipment)
# # return e
# from jsoncomment import JsonComment
# from osrsmath.apps.optimize import get_sets, eval_set, load, get_best_set
# from osrsmath.combat.fighter import get_equipment_data
# from osrsmath.combat.experience import combat_level
# from osrsmath.combat.monsters import Monster
# from osrsmath.combat.boosts import BoostingSchemes, Prayers, Potions
# from pprint import pprint
# import sys
# import os
# from jsoncomment import JsonComment
# from osrsmath.combat.monsters import Monster, get_monster_data
# from osrsmath.combat.fighter import PlayerBuilder, get_equipment_data, get_equipment_by_name
# from osrsmath.combat.rates import experience_per_hour
# from osrsmath.combat.experience import combat_level, xp_rate
# from osrsmath.combat.boosts import BoostingSchemes
# from osrsmath.combat import successful_hits
# from collections import defaultdict
# from pprint import pprint
# import osrsmath.apps.nmz as nmz
# import numpy as np
# import copy
# weapon_type = 'weapon'
# def get_stances_that_can_train(weapon, skill, allow_shared=True):
# """ Returns the stances (by combat_style) that the {weapon} can train the {skill} with. """
# stances = []
# for stance in weapon['weapon']['stances']:
# s = stance['experience']
# # Stances come in 4 flavors:
# # 1) attack, strength, defence, magic, ranged
# # 2) "magic and defence", "ranged and defence"
# # 3) none
# # 4) shared
# # The first 2 can be identifies with the '==' or 'in' operators, respectively.
# # The 3rd is simply ignored. Since the 2nd accounts for both magic and ranged,
# # 'shared' is specifically melee.
# assert s in ('attack', 'strength', 'defence', 'magic', 'ranged', 'magic and defence', 'ranged and defence', 'none', 'shared'), s
# if skill == s:
# stances.append(stance['combat_style'])
# elif allow_shared:
# if skill in s:
# stances.append(stance['combat_style'])
# elif s == 'shared':
# assert stance['attack_type'] in ('stab', 'crush', 'slash'), stance['attack_type']
# if skill in ('attack', 'strength', 'defence'):
# stances.append(stance['combat_style'])
# return stances
# def get_attack_types_that_train(weapon, skill, allow_shared=True):
# stances_dict = {s['combat_style']: s for s in weapon['weapon']['stances']}
# stances = get_stances_that_can_train(weapon, skill, allow_shared)
# possible_attack_types = set()
# for stance in stances:
# stance = stances_dict[stance]
# if 'magic' in stance['experience']:
# possible_attack_types.add('attack_magic')
# elif 'ranged' in stance['experience']:
# possible_attack_types.add('attack_ranged')
# else:
# possible_attack_types.add('attack_' + stance['attack_type'])
# return possible_attack_types
# def get_weapons_that_can_train(skill, allow_shared=True, equipment_data=get_equipment_data()):
# assert skill in ('attack', 'strength', 'defence', 'magic', 'ranged')
# weapons = []
# for ID, weapon in list(equipment_data[weapon_type].items()):
# assert weapon['equipable_by_player'] and weapon['weapon']
# if get_stances_that_can_train(weapon, skill, allow_shared):
# weapons.append(weapon)
# return weapons
# def get_weapons_that_have_training_bonuses(skill, allow_shared=True, equipment=get_equipment_data()):
# weapons = []
# for weapon in get_weapons_that_can_train(skill, allow_shared, equipment):
# possible_attack_types = get_attack_types_that_train(weapon, skill, allow_shared)
# assert possible_attack_types
# # Could also include weapon['weapon']['attack_speed'] < 4 for weapons faster than unarmed.
# # but this would only be additionally added if they have no relevant bonuses.
# if any(weapon['equipment'][attack_type] > 0 for attack_type in possible_attack_types):
# weapons.append(weapon)
# return weapons
# def meets_requirements(player_stats, equipment):
# if equipment['equipment']['requirements'] is None:
# return True
# for stat, req in equipment['equipment']['requirements'].items():
# if stat not in player_stats:
# raise ValueError(f"Supply your {stat} level to check {equipment['name']} for: {equipment['equipment']['requirements']}")
# if player_stats[stat] < req:
# return False
# return True
# def get_equipable(equipment, player_stats, ignore, adjustments):
# equipable = []
# for eq in equipment:
# if eq['name'] in ignore:
# continue
# if any([eq['name'].startswith(s) for s in ('Corrupted', 'Zuriel', 'Statius', 'Vesta')]):
# continue
# if eq['name'] in adjustments:
# eq['equipment']['requirements'] = adjustments[eq['name']]
# if meets_requirements(player_stats, eq):
# equipable.append(eq)
# return equipable
# def is_better(A, B):
# """ Is A absolutely better than B?
# @param A Equipment stats eg: {'attack_crush': 53, ...}
# @param B Equipment stats """
# assert list(A.keys()) == list(B.keys())
# if list(A.values()) == list(B.values()):
# return False
# for a, b in zip(list(A.values()), list(B.values())):
# if b > a:
# return False
# return True
# def reduce_bonuses(equipment, attack_type):
# bonuses = [attack_type]
# if 'magic' in attack_type:
# bonuses.append('magic_damage')
# elif 'ranged' in attack_type:
# bonuses.append('ranged_strength')
# else:
# bonuses.append('melee_strength')
# reduced = {b: v for b, v in equipment['equipment'].items() if b in bonuses}
# reduced['reciprocal_attack_speed'] = 1/equipment['weapon']['attack_speed']
# return reduced
# def get_best_options(skill, player_stats, ignore, adjustments, allow_shared=True):
# weapons = get_equipable(get_weapons_that_have_training_bonuses(skill, allow_shared), player_stats, ignore, adjustments)
# # Sort equipment by attack bonus required to train
# weapons_by_attack_type = defaultdict(list)
# for weapon in weapons:
# for attack_type in get_attack_types_that_train(weapon, skill, allow_shared):
# weapons_by_attack_type[attack_type].append(weapon)
# # Then only select the strictly better set of those.
# best_by_attack_type = defaultdict(list)
# for attack_type, weapons in weapons_by_attack_type.items():
# for weapon in weapons:
# # So long as not everyone is better than you.
# if all([not is_better(
# reduce_bonuses(w, attack_type),
# reduce_bonuses(weapon, attack_type)
# ) for w in weapons]) and ( # And your stats aren't already included
# reduce_bonuses(weapon, attack_type) not in [
# reduce_bonuses(e, attack_type) for e in best_by_attack_type[attack_type]
# ]
# ):
# best_by_attack_type[attack_type].append(weapon)
# return best_by_attack_type
# if __name__ == '__main__':
# player_stats, defenders, ignore, adjustments = load("settings.json")
# player_stats.update({
# 'attack': 60,
# 'strength': 60,
# 'defence': 50,
# 'hitpoints': 50,
# 'magic': 50,
# 'ranged': 50,
# 'prayer': 43
# })
# player_stats.update({'cmb': combat_level(player_stats)})
# ignore.extend([
# 'Amulet of torture',
# 'Amulet of torture (or)',
# 'Fighter torso',
# 'Fire cape',
# ])
# # e = get_equipable(get_weapons_that_have_training_bonuses('attack', True), player_stats, ignore, adjustments)
# e = get_best_options('defence', player_stats, ignore, adjustments, allow_shared=False)
# pprint([(e, [w['name'] for w in ws]) for e, ws in e.items()])
# # print(len(e))
# # I now have a function that will give me the best weapons possible for a given attack_type for training something
# # So "train attack using slash" gives me a weapon set.
# # For each attack_type, I can try on the equipment that gives the best bonuses.
# # ie for attack_stab, what helmets give the best bonus? (considering that str might also go up)
# # At the end of the day, the user should be able to click on a suggest, and say "what are alternatives"
# # best_set = get_best_set(player_stats, 'attack',
# # lambda p: BoostingSchemes(p, Prayers.none).constant(Potions.overload),
# # defenders, get_sets(player_stats, defenders, ignore, adjustments), include_shared_xp=True
# # )
# # pprint(best_set)
|
import logging
from typing import Any, Dict, Optional, Tuple
import lumos.numpy as lnp
from lumos.models.base import model_io, ModelReturn
from lumos.models.tires.base import BaseTire
logger = logging.getLogger(__name__)
@model_io(
inputs=(
"Fz", # vertical load
"kappa", # slip ratio
"alpha", # slip angle
"vx", # x-velocity in tire coordinate
"gamma", # inclination angle
),
outputs=("Fx", "Fy", "Mx", "My", "Mz", "Kxk", "Gxa", "Kya", "Gyk", "GSum"),
)
class MF52(BaseTire):
def __init__(
self,
model_config: Dict[str, Any] = {},
params: Dict[str, Any] = {},
opt_mode: bool = True,
):
"""MF5.2 Pacjeka magic formula
Args:
params (Dict[str, Any], optional): model parameters. Defaults to None.
opt_mode (bool, optional): use optimization mode if True. Defaults to True.
currently _opt_mode is used to make an optimization variant of the model, which
includes the following modifications:
- no calculation of moments (to speed up compile time for experiment,until
vehicle model is ready)
Reference implementation in:
- doc: https://mfeval.wordpress.com
- code: https://de.mathworks.com/matlabcentral/fileexchange/63618-mfeval
The MF5.2 is steady-state model which also doesn't contain effects such as:
- effects from inflation pressure
- some secondary effects from camber angle
- speed effect on grip
Also the variant implemented here ignores turn slip.
ISO-W (TYDEX W) Contact-Patch Axis System coordinate system is used in
all calculations, see page 29 of
https://functionbay.com/documentation/onlinehelp/Documents/Tire/MFTire-MFSwift_Help.pdf
params refers to a MF-Tire tire property file (.TIR) containing
all the Magic Formula coefficients or to a structure with all the parameters.
All units are SI.
"""
super().__init__(model_config=model_config, params=params)
self._opt_mode = opt_mode
@classmethod
def get_default_params(cls) -> Dict[str, Any]:
# default pacejka parameters from data/tires/default.tir
params = {
"FILE_VERSION": 3.0,
"VXLOW": 1.0,
"LONGVL": 15.0,
"UNLOADED_RADIUS": 0.4,
"WIDTH": 0.235,
"RIM_RADIUS": 0.2032,
"RIM_WIDTH": 0.1778,
"VERTICAL_STIFFNESS": 463000.0,
"VERTICAL_DAMPING": 500.0,
"BREFF": 8.4,
"DREFF": 0.27,
"FREFF": 0.07,
"FNOMIN": 9416.6,
"KPUMIN": -1.5,
"KPUMAX": 1.5,
"ALPMIN": -1.5,
"ALPMAX": 1.5,
"CAMMIN": -0.2634,
"CAMMAX": 0.26395,
"FZMIN": 100.0,
"FZMAX": 20469.8,
"LFZO": 1.0,
"LCX": 1.0,
"LMUX": 1.0,
"LEX": 1.0,
"LKX": 1.0,
"LHX": 1.0,
"LVX": 1.0,
"LGAX": 1.0,
"LCY": 1.0,
"LMUY": 1.0,
"LEY": 1.0,
"LKY": 1.0,
"LHY": 1.0,
"LVY": 1.0,
"LGAY": 1.0,
"LTR": 1.0,
"LRES": 1.0,
"LGAZ": 1.0,
"LXAL": 1.0,
"LYKA": 1.0,
"LVYKA": 1.0,
"LS": 1.0,
"LSGKP": 1.0,
"LSGAL": 1.0,
"LGYR": 1.0,
"LMX": 1.0,
"LVMX": 1.0,
"LMY": 1.0,
"PCX1": 1.3605,
"PDX1": 0.885,
"PDX2": -0.05653,
"PDX3": -8.768,
"PEX1": -0.13155,
"PEX2": -0.475,
"PEX3": -1.3126,
"PEX4": -1.685,
"PKX1": 16.675,
"PKX2": -0.04552,
"PKX3": 0.4205,
"PHX1": 8.731e-05,
"PHX2": -0.0011733,
"PVX1": 0.01913,
"PVX2": 0.02654,
"RBX1": 9.509,
"RBX2": -9.51,
"RCX1": 1.0061,
"REX1": -1.9815,
"REX2": -4.552,
"RHX1": 0.010039,
"PTX1": 1.1898,
"PTX2": -0.0006148,
"PTX3": -0.3717,
"QSX1": -0.003093,
"QSX2": 0.6038,
"QSX3": 0.025405,
"PCY1": 1.889,
"PDY1": 0.8271,
"PDY2": -0.2306,
"PDY3": -7.088,
"PEY1": 0.4585,
"PEY2": -0.21823,
"PEY3": -0.3872,
"PEY4": 3.96,
"PKY1": -29.1,
"PKY2": 7.576,
"PKY3": 0.944,
"PHY1": 0.004476,
"PHY2": 0.0025996,
"PHY3": 0.006973,
"PVY1": 0.005978,
"PVY2": 0.008146,
"PVY3": -0.4066,
"PVY4": -0.4417,
"RBY1": 5.042,
"RBY2": 4.858,
"RBY3": 0.06116,
"RCY1": 1.0642,
"REY1": -0.9207,
"REY2": -0.4334,
"RHY1": -0.0009546,
"RHY2": 3.506e-05,
"RVY1": -0.02541,
"RVY2": 0.02259,
"RVY3": -0.9322,
"RVY4": 21.16,
"RVY5": 1.9,
"RVY6": -15.552,
"PTY1": 1.6245,
"PTY2": 2.8506,
"QSY1": 0.01,
"QSY2": 0.0,
"QSY3": 0.0,
"QSY4": 0.0,
"QBZ1": 6.035,
"QBZ2": -0.8902,
"QBZ3": 1.1858,
"QBZ4": 0.0887,
"QBZ5": -0.2598,
"QBZ9": 7.32,
"QBZ10": 0.0,
"QCZ1": 1.6637,
"QDZ1": 0.07618,
"QDZ2": -0.02726,
"QDZ3": 0.5106,
"QDZ4": 27.5,
"QDZ6": -0.0012362,
"QDZ7": -0.003485,
"QDZ8": -0.10836,
"QDZ9": -0.03984,
"QEZ1": -0.04218,
"QEZ2": 0.002656,
"QEZ3": 0.0,
"QEZ4": 1.3648,
"QEZ5": -99.3,
"QHZ1": 0.019396,
"QHZ2": -0.0017907,
"QHZ3": 0.09319,
"QHZ4": -0.03752,
"SSZ1": 0.009003,
"SSZ2": -0.011339,
"SSZ3": 0.6903,
"SSZ4": -0.7046,
"QTZ1": 0.0,
"MBELT": 0.0,
"epsilonv": 1e-06,
"epsilonx": 0.001,
"epsilonk": 1e-06,
"epsilony": 0.001,
}
return params
def forward(self, inputs: lnp.ndarray) -> lnp.ndarray:
"""Pacejka MF5.2 tire model
There are essentially two sources of equations:
- from paper, makred with A (eg A56)
- from the book equations from page 184 onwards (4.3.2 Full Set of Equations).
marked with E4.xx, eg: E4.43
Book:
Title: Tire and Vehicle Dynamics
Author: Hans Pacejka
Edition: 3, revised
https://www.elsevier.com/books/tire-and-vehicle-dynamics/pacejka/978-0-08-097016-5
Paper:
Besselink, I. J. M. , Schmeitz, A. J. C. and Pacejka, H. B.(2010)
‘An improved Magic Formula/Swift tyre model that can handle inflation pressure changes’
Vehicle System Dynamics, 48: 1, 337 — 352
http://dx.doi.org/10.1080/00423111003748088
"""
# Unpack inputs
gamma = inputs["gamma"]
vx = inputs["vx"]
Fz = inputs["Fz"]
kappa = inputs["kappa"]
alpha = inputs["alpha"]
(Fx, Fy, Mx, My, Mz, Kxk, Gxa, Kya, Gyk) = self._do_force_and_moments(
kappa=kappa, alpha=alpha, gamma=gamma, vx=vx, Fz=Fz,
)
outputs = dict(
Fx=Fx,
Fy=Fy,
Mx=Mx,
My=My,
Mz=Mz,
Kxk=Kxk,
Gxa=Gxa,
Kya=Kya,
Gyk=Gyk,
GSum=lnp.sqrt(Gyk ** 2 + Gxa ** 2),
)
return ModelReturn(outputs=outputs)
def _do_force_and_moments(
self, kappa: float, alpha: float, gamma: float, vx: float, Fz: float,
) -> lnp.ndarray:
"""Top level computation
See MFeval Solver.doForcesAndMoments
"""
starVar, primeVar, incrVar = self._calculate_basic(
kappa=kappa, alpha=alpha, Vcx=vx, gamma=gamma, Fz=Fz,
)
Fx0, mux, Kxk = self._calculate_Fx0(
kappa=kappa,
gamma=gamma,
Fz=Fz,
starVar=starVar,
primeVar=primeVar,
incrVar=incrVar,
)
Fx, Gxa = self._calculate_Fx(
kappa=kappa, starVar=starVar, incrVar=incrVar, Fx0=Fx0,
)
Fy0, muy, Kya, SHy, SVy, By, Cy = self._calculate_Fy0(
Fz=Fz, starVar=starVar, primeVar=primeVar, incrVar=incrVar
)
Fy, Gyk, SVyk = self.calcluate_Fy(
Fz=Fz, kappa=kappa, Fy0=Fy0, muy=muy, starVar=starVar, incrVar=incrVar,
)
if self._opt_mode:
# Skip computaion of moments
Mx = My = Mz = 0.0
else:
# TODO: clean this up
lat_coeff = {
"Kya": Kya,
"SHy": SHy,
"SVy": SVy,
"SVyk": SVyk,
"By": By,
"Cy": Cy,
}
Mx = self._calculate_Mx(gamma=gamma, Fz=Fz, Fy=Fy)
My = self._calculate_My(Fz=Fz, vx=vx, Fx=Fx)
# NOTE: we combined the calculateMz0 and calculateMz from the origional function
# as the Mz0 value is not needed in MF5.2. (but a few intermedaite coefficients
# are needed.)
Mz = self._calculate_Mz(
kappa=kappa,
alpha=alpha,
gamma=gamma,
Fz=Fz,
vx=vx,
Fx=Fx,
Kxk=Kxk,
Fy=Fy,
lat_coeff=lat_coeff,
starVar=starVar,
primeVar=primeVar,
incrVar=incrVar,
)
return (Fx, Fy, Mx, My, Mz, Kxk, Gxa, Kya, Gyk)
def _calculate_basic(
self, kappa: float, alpha: float, Vcx: float, gamma: float, Fz: float,
) -> Tuple[Dict[str, float]]:
"""Corresponds to Solver.calculateBasic in mfeval
Note:
This implementation ignores the potential use of tan(alpha) at very large slip
angles and possibly backwards running of the wheel, Eqn (4.E3-4.E4) p.177 of Book
This is called alpha_star in MFeval
"""
# TODO: there are things we could simplify here:
# 1) speed dependent grip: LMUV does not exist in MF5.2, so hard coded to 0,
# which means we could get rid of related computation
# 2) pressure effect: does not exist in MF5.2, so we could get rid of dpi.
# Unpack Parameters
Fz0 = self._params["FNOMIN"] # Nominal wheel load
LFZO = self._params["LFZO"] # Scale factor of nominal (rated) load
LMUX = self._params["LMUX"] # Scale factor of Fx peak friction coefficient
LMUY = self._params["LMUY"] # Scale factor of Fy peak friction coefficient
epsilonv = self._params["epsilonv"]
# Slip velocities in point S (slip point)
Vsy = (
lnp.tan(alpha) * Vcx
) # Assumption page 67 of book, paragraph above Eqn (2.11)
# Velocities in point C (contact)
Vcy = Vsy
# TODO: sqrt singularity here
Vc = lnp.sqrt(Vcx ** 2 + Vcy ** 2) # Velocity of wheel contact centre C,
# Not described in the book but is the same as [Eqn (3.39) Page 102 - Book]
# Effect of having a tire with a different nominal load
Fz0_prime = LFZO * Fz0 # [Eqn (4.E1) Page 177 - Book]
# Normalized change in vertical load
dfz = (Fz - Fz0_prime) / Fz0_prime # [Eqn (4.E2a) Page 177 - Book]
# NOTE: we do not use alpha_star mode from MFeval
alpha_star = alpha
gamma_star = gamma
# For the aligning torque at high slip angles
# NOTE: we omit the sign(Vc) part as we assume Vc is always positive.
Vc_prime = Vc + epsilonv # [Eqn (4.E6a) Page 178 - Book]
alpha_prime = lnp.arccos(Vcx / Vc_prime) # [Eqn (4.E6) Page 177 - Book]
# NOTE: here we ignore mu scaling with changing slip speed as these are not
# included in MF5.2 (effect set to )
LMUX_star = LMUX # [Eqn (4.E7) Page 179 - Book] without slip speed effect
LMUY_star = LMUY # [Eqn (4.E7) Page 179 - Book] without slip speed effect
# Degressive friction factor
# On Page 179 of the book is suggested Amu = 10, but after comparing the use of
# the scaling factors against TNO, Amu = 1 was giving perfect match
Amu = 1
# [Eqn (4.E8) Page 179 - Book]
LMUX_prime = Amu * LMUX_star / (1 + (Amu - 1) * LMUX_star)
LMUY_prime = Amu * LMUY_star / (1 + (Amu - 1) * LMUY_star)
# Pack outputs
starVar = {
"alpha_star": alpha_star,
"gamma_star": gamma_star,
"LMUX_star": LMUX_star,
"LMUY_star": LMUY_star,
}
primeVar = {
"Fz0_prime": Fz0_prime,
"alpha_prime": alpha_prime,
"LMUX_prime": LMUX_prime,
"LMUY_prime": LMUY_prime,
}
incrVar = {"dfz": dfz}
return starVar, primeVar, incrVar
def _calculate_Fx0(self, kappa, gamma, Fz, starVar, primeVar, incrVar):
"""Calculate the longitudinal tire force for pure longitudinal slips.
See MFEval Solver.calculateFx0
"""
# [SCALING_COEFFICIENTS]
LCX = self._params["LCX"] # Scale factor of Fx shape factor
LEX = self._params["LEX"] # Scale factor of Fx curvature factor
LKX = self._params["LKX"] # Scale factor of Fx slip stiffness
LHX = self._params["LHX"] # Scale factor of Fx horizontal shift
LVX = self._params["LVX"] # Scale factor of Fx vertical shift
# [LONGITUDINAL_COEFFICIENTS]
PCX1 = self._params["PCX1"] # Shape factor Cfx for longitudinal force
PDX1 = self._params["PDX1"] # Longitudinal friction Mux at Fznom
PDX2 = self._params["PDX2"] # Variation of friction Mux with load
PDX3 = self._params["PDX3"] # Variation of friction Mux with camber squared
PEX1 = self._params["PEX1"] # Longitudinal curvature Efx at Fznom
PEX2 = self._params["PEX2"] # Variation of curvature Efx with load
PEX3 = self._params["PEX3"] # Variation of curvature Efx with load squared
PEX4 = self._params["PEX4"] # Factor in curvature Efx while driving
PKX1 = self._params["PKX1"] # Longitudinal slip stiffness Kfx/Fz at Fznom
PKX2 = self._params["PKX2"] # Variation of slip stiffness Kfx/Fz with load
PKX3 = self._params["PKX3"] # Exponent in slip stiffness Kfx/Fz with load
PHX1 = self._params["PHX1"] # Horizontal shift Shx at Fznom
PHX2 = self._params["PHX2"] # Variation of shift Shx with load
PVX1 = self._params["PVX1"] # Vertical shift Svx/Fz at Fznom
PVX2 = self._params["PVX2"] # Variation of shift Svx/Fz with load
epsilonx = self._params["epsilonx"]
dfz = incrVar["dfz"]
LMUX_star = starVar["LMUX_star"]
LMUX_prime = primeVar["LMUX_prime"]
# NOTE: we ignore turn slip
zeta1 = 1.0
# Longitudinal force (pure longitudinal slip, alpha = 0)
Cx = PCX1 * LCX # (> 0) (4.E11)
# NOTE: here for mux and Kxk we ignore inflation pressure effects which are only
# in MF6.1 (PPX1, 2, 3, 4...)
mux = (PDX1 + PDX2 * dfz) * (1 - PDX3 * gamma ** 2) * LMUX_star # (4.E13)
# TODO: here we don't ensure Dx is not -ve. But neither does MFeval
# (-ve Dx should only happen when Fz is -ve or if mux is -ve, as zeta1=1.0)
Dx = mux * Fz * zeta1 # (> 0) (4.E12)
Kxk = (
Fz * (PKX1 + PKX2 * dfz) * lnp.exp(PKX3 * dfz) * LKX
) # (= BxCxDx = dFxo./dkx at kappax = 0) (= Cfk) (4.E15)
# NOTE: here we ignore the sign(Dx) part muliplied on epsilonx.
Bx = Kxk / (
Cx * Dx + epsilonx * lnp.sign(Dx)
) # (4.E16) [sign(Dx) term explained on page 177]
SHx = (PHX1 + PHX2 * dfz) * LHX # (4.E17)
SVx = Fz * (PVX1 + PVX2 * dfz) * LVX * LMUX_prime * zeta1 # (4.E18)
# NOTE: here we ignore the low speed model part in MFeval
kappax = kappa + SHx # (4.E10)
# NOTE: here we ignore the linear transient part in MFeval
# Ex is a function of sign(kappa)
# TODO: here we have a discontinous function lnp.sign
# jax: jacobian(jnp.sign)(0.0) -> 0.0, so always 0.0 derivative
Ex = (
(PEX1 + PEX2 * dfz + PEX3 * dfz ** 2) * (1 - PEX4 * lnp.sign(kappax)) * LEX
) # (<=1) (4.E14)
# TODO: here we ignore limit that Ex needs to be <=1 (enforced in MFeval if
# useLimitsCheck == True
Ex = lnp.minimum(Ex, 1.0)
# Pure longitudinal force
Bx_kappax = Bx * kappax
Fx0 = (
Dx
* lnp.sin(
Cx * lnp.arctan(Bx_kappax - Ex * (Bx_kappax - lnp.arctan(Bx_kappax)))
)
+ SVx
) # (4.E9)
return Fx0, mux, Kxk
def _calculate_Fx(self, kappa, starVar, incrVar, Fx0):
"""Combined slip modifier on top of pure longitudinal slip force.
See MFeval Solver.calculateFx
"""
# Longitudinal force (combined slip)
alpha_star = starVar["alpha_star"]
gamma_star = starVar["gamma_star"]
dfz = incrVar["dfz"]
# [SCALING_COEFFICIENTS]
LXAL = self._params["LXAL"] # Scale factor of alpha influence on Fx
# [LONGITUDINAL_COEFFICIENTS]
RBX1 = self._params["RBX1"] # Slope factor for combined slip Fx reduction
RBX2 = self._params["RBX2"] # Variation of slope Fx reduction with kappa
RCX1 = self._params["RCX1"] # Shape factor for combined slip Fx reduction
REX1 = self._params["REX1"] # Curvature factor of combined Fx
REX2 = self._params["REX2"] # Curvature factor of combined Fx with load
RHX1 = self._params["RHX1"] # Shift factor for combined slip Fx reduction
Cxa = RCX1 # (4.E55)
# TODO: here we ignore the limit check on Exa <=1
# MFeval checks it when useLimitsCheck == True
Exa = REX1 + REX2 * dfz # (<= 1) (4.E56)
Exa = lnp.minimum(Exa, 1.0)
SHxa = RHX1 # (4.E57)
# NOTE: here RBX3 effect is ignored as it's only from MF6.1 onwards.
# TODO: here we ignore the limit check for Bxa > 0 (but MFeval doesn't check it
# either)
Bxa = RBX1 * lnp.cos(lnp.arctan(RBX2 * kappa)) * LXAL # (> 0) (4.E54)
alphas = alpha_star + SHxa # (4.E53)
Bxa_SHxa = Bxa * SHxa
Gxa0 = lnp.cos(
Cxa * lnp.arctan(Bxa_SHxa - Exa * (Bxa_SHxa - lnp.arctan(Bxa_SHxa)))
) # (4.E52)
# Gxa is a function of alpha since alphas is a funciton of alpha
# it's also a function of kappa as Bxa is a function of kappa
# exa
# TODO: here we ignore the lmit check on Gxa > 0, which MFeval ignores as well.
Bxa_alphas = Bxa * alphas
Gxa = (
lnp.cos(
Cxa
* lnp.arctan(Bxa_alphas - Exa * (Bxa_alphas - lnp.arctan(Bxa_alphas)))
)
/ Gxa0 # (> 0)(4.E51)
)
Fx = Gxa * Fx0 # (4.E50)
return Fx, Gxa
def _calculate_Fy0(self, Fz, starVar, primeVar, incrVar):
"""Calculate the lateral tire force for pure lateral slips.
See MFeval Solver.calculateFy0
"""
# [SCALING_COEFFICIENTS]
LCY = self._params["LCY"] # Scale factor of Fy shape factor
LEY = self._params["LEY"] # Scale factor of Fy curvature factor
LKY = self._params["LKY"] # Scale factor of Fy cornering stiffness
LHY = self._params["LHY"] # Scale factor of Fy horizontal shift
LVY = self._params["LVY"] # Scale factor of Fy vertical shift
# [LATERAL_COEFFICIENTS]
PCY1 = self._params["PCY1"] # Shape factor Cfy for lateral forces
PDY1 = self._params["PDY1"] # Lateral friction Muy
PDY2 = self._params["PDY2"] # Variation of friction Muy with load
PDY3 = self._params["PDY3"] # Variation of friction Muy with squared camber
PEY1 = self._params["PEY1"] # Lateral curvature Efy at Fznom
PEY2 = self._params["PEY2"] # Variation of curvature Efy with load
PEY3 = self._params["PEY3"] # Zero order camber dependency of curvature Efy
PEY4 = self._params["PEY4"] # Variation of curvature Efy with camber
PKY1 = self._params["PKY1"] # Maximum value of stiffness Kfy./Fznom
PKY2 = self._params["PKY2"] # Load at which Kfy reaches maximum value
PKY3 = self._params["PKY3"] # Variation of Kfy./Fznom with camber
PHY1 = self._params["PHY1"] # Horizontal shift Shy at Fznom
PHY2 = self._params["PHY2"] # Variation of shift Shy with load
PHY3 = self._params["PHY3"] # Variation of shift Shy with camber
PVY1 = self._params["PVY1"] # Vertical shift in Svy./Fz at Fznom
PVY2 = self._params["PVY2"] # Variation of shift Svy./Fz with load
PVY3 = self._params["PVY3"] # Variation of shift Svy./Fz with camber
PVY4 = self._params["PVY4"] # Variation of shift Svy./Fz with camber and load
# NOTE: we only keep the branch for useTurnSlip == False
# No turn slip and small camber angles
# First paragraph on page 178 of the book
zeta0 = 1.0
zeta2 = 1.0
zeta3 = 1.0
zeta4 = 1.0
epsilony = self._params["epsilony"]
dfz = incrVar["dfz"]
LMUY_star = starVar["LMUY_star"]
alpha_star = starVar["alpha_star"]
gamma_star = starVar["gamma_star"]
LMUY_prime = primeVar["LMUY_prime"]
Fz0_prime = primeVar["Fz0_prime"]
# TODO: here we need smooth abs
abs_gamma_star = lnp.abs(gamma_star)
# NOTE: here we exclude the PPY1 factors (inflation pressure) and PKY4(curvature
# of stiffness Kya) and PKY5(peak stiffness variation with camber squared)
# which are only available in MF6.1
Kya = (
PKY1
* Fz0_prime
* (1 - PKY3 * abs_gamma_star)
* lnp.sin(2 * lnp.arctan(Fz / Fz0_prime / PKY2))
* zeta3
* LKY
) # (= ByCyDy = dFyo./dalphay at alphay = 0) (if gamma =0: =Kya0 = CFa)
# Based on (4.E25), it is equivalent to PKY4
# NOTE: no LKYC (camber force stiffness scaling factor), MF6.1 only.
SVyg = Fz * (PVY3 + PVY4 * dfz) * gamma_star * LMUY_prime * zeta2 # (4.E28)
# NOTE: Ignored Kya0 and Kyg0 computaiton as they are only used for turn slip
# and MF6.1 related computations
# MF5.2 branch of MFeval, From the MF-Tyre equation manual
SHy = (PHY1 + PHY2 * dfz) * LHY + PHY3 * gamma_star
SVy = Fz * (PVY1 + PVY2 * dfz) * LVY * LMUY_prime * zeta2 + SVyg # (4.E29)
alphay = alpha_star + SHy # (4.E20)
Cy = PCY1 * LCY # (> 0) (4.E21)
# NOTE: ignore PPY4 and PPY4 effect, MF6.1 only
muy = (PDY1 + PDY2 * dfz) * (1 - PDY3 * gamma_star ** 2) * LMUY_star # (4.E23)
Dy = muy * Fz * zeta2 # (4.E22)
# TODO: smoothen sign
sign_alphay = lnp.sign(alphay)
# NOTE: ignore PEY4 and PEY5 (effects w.r.t. camber), MF6.1 only
Ey = (
(PEY1 + PEY2 * dfz) * (1 - (PEY3 + PEY4 * gamma_star) * sign_alphay) * LEY
) # (<=1)(4.E24)
# TODO: smoothen min
Ey = lnp.minimum(Ey, 1.0)
signDy = lnp.sign(Dy)
# NOTE: ignored assigning signDy[signDy==0] = 1
By = Kya / (
Cy * Dy + epsilony * signDy
) # (4.E26) [sign(Dy) term explained on page 177]
# Pure lateral force
Fy0 = (
Dy
* lnp.sin(
Cy
* lnp.arctan(By * alphay - Ey * (By * alphay - lnp.arctan(By * alphay)))
)
+ SVy
) # (4.E19)
return Fy0, muy, Kya, SHy, SVy, By, Cy
def calcluate_Fy(self, Fz, kappa, Fy0, muy, starVar, incrVar):
"""Combined slip force on top of pure lateral slip force
See MFeval Solver.calculateFy
"""
# [SCALING_COEFFICIENTS]
LYKA = self._params["LYKA"] # Scale factor of alpha influence on Fx
LVYKA = self._params["LVYKA"] # Scale factor of kappa induced Fy
alpha_star = starVar["alpha_star"]
gamma_star = starVar["gamma_star"]
dfz = incrVar["dfz"]
# NOTE: no turn slip
zeta2 = 1.0
# [LATERAL_COEFFICIENTS]
RBY1 = self._params["RBY1"] # Slope factor for combined Fy reduction
RBY2 = self._params["RBY2"] # Variation of slope Fy reduction with alpha
RBY3 = self._params["RBY3"] # Shift term for alpha in slope Fy reduction
RCY1 = self._params["RCY1"] # Shape factor for combined Fy reduction
REY1 = self._params["REY1"] # Curvature factor of combined Fy
REY2 = self._params["REY2"] # Curvature factor of combined Fy with load
RHY1 = self._params["RHY1"] # Shift factor for combined Fy reduction
RHY2 = self._params["RHY2"] # Shift factor for combined Fy reduction with load
RVY1 = self._params["RVY1"] # Kappa induced side force Svyk/muy*Fz at Fznom
RVY2 = self._params["RVY2"] # Variation of Svyk/muy*Fz with load
RVY3 = self._params["RVY3"] # Variation of Svyk/muy*Fz with camber
RVY4 = self._params["RVY4"] # Variation of Svyk/muy*Fz with alpha
RVY5 = self._params["RVY5"] # Variation of Svyk/muy*Fz with kappa
RVY6 = self._params["RVY6"] # Variation of Svyk/muy*Fz with atan(kappa)
DVyk = (
muy
* Fz
* (RVY1 + RVY2 * dfz + RVY3 * gamma_star)
* lnp.cos(lnp.arctan(RVY4 * alpha_star))
* zeta2
) # (4.E67)
SVyk = DVyk * lnp.sin(RVY5 * lnp.arctan(RVY6 * kappa)) * LVYKA # (4.E66)
SHyk = RHY1 + RHY2 * dfz # (4.E65)
Eyk = REY1 + REY2 * dfz # (<=1) (4.E64)
# TODO: smoothen minimum func
Eyk = lnp.minimum(Eyk, 1.0)
Cyk = RCY1 # (4.E63)
Byk = (
RBY1 * lnp.cos(lnp.arctan(RBY2 * (alpha_star - RBY3))) * LYKA
) # (>0) (4.E62)
# NOTE: Byk should be > 0, but MFeval did not enforce it.
kappas = kappa + SHyk # (4.E61)
Byk_SHyk = Byk * SHyk
Gyk0 = lnp.cos(
Cyk * lnp.arctan(Byk_SHyk - Eyk * (Byk_SHyk - lnp.arctan(Byk_SHyk)))
) # (4.E60)
Byk_kappas = Byk * kappas
Gyk = (
lnp.cos(
Cyk
* lnp.arctan(Byk_kappas - Eyk * (Byk_kappas - lnp.arctan(Byk_kappas)))
)
/ Gyk0
) # (> 0)(4.E59)
# NOTE: Gyk should be > 0, but MFeval did not enforce it
Fy = Gyk * Fy0 + SVyk # (4.E58)
return Fy, Gyk, SVyk
def _calculate_Mz(
self,
kappa,
alpha,
gamma,
Fz,
vx,
Fx,
Kxk,
Fy,
lat_coeff,
starVar,
primeVar,
incrVar,
):
"""Calculate the self-aligning torque for combined slips.
We have combined two funtions from MFeval into one.
See MFEval Solver.calculateMz0 and Solver.calculateMz
"""
epsilonk = self._params["epsilonk"]
Kya = lat_coeff["Kya"]
SHy = lat_coeff["SHy"]
SVy = lat_coeff["SVy"]
SVyk = lat_coeff["SVyk"]
By = lat_coeff["By"]
Cy = lat_coeff["Cy"]
dfz = incrVar["dfz"]
LMUY_star = starVar["LMUY_star"]
Fz0_prime = primeVar["Fz0_prime"]
alpha_prime = primeVar["alpha_prime"]
# [DIMENSION]
R0 = self._params["UNLOADED_RADIUS"] # Free tire radius
# [VERTICAL]
Fz0 = self._params["FNOMIN"] # Nominal wheel load
# [SCALING_COEFFICIENTS]
LKY = self._params["LKY"] # Scale factor of Fy cornering stiffness
LTR = self._params["LTR"] # Scale factor of peak of pneumatic trail
LRES = self._params["LRES"] # Scale factor for offset of residual torque
LS = self._params["LS"] # Scale factor of lever arm of Fx
LFZO = self._params["LFZO"] # Scale factor of nominal (rated) load
# [ALIGNING_COEFFICIENTS]
QBZ1 = self._params["QBZ1"] # Trail slope factor for trail Bpt at Fznom
QBZ2 = self._params["QBZ2"] # Variation of slope Bpt with load
QBZ3 = self._params["QBZ3"] # Variation of slope Bpt with load squared
QBZ4 = self._params["QBZ4"] # Variation of slope Bpt with camber
QBZ5 = self._params["QBZ5"] # Variation of slope Bpt with absolute camber
QBZ9 = self._params["QBZ9"] # Factor for scaling factors of Br of Mzr
QBZ10 = self._params["QBZ10"] # Factor for corner. stiff. of Br of Mzr
QCZ1 = self._params["QCZ1"] # Shape factor Cpt for pneumatic trail
QDZ1 = self._params["QDZ1"] # Peak trail Dpt = Dpt*(Fz/Fznom.*R0)
QDZ2 = self._params["QDZ2"] # Variation of peak Dpt" with load
QDZ3 = self._params["QDZ3"] # Variation of peak Dpt" with camber
QDZ4 = self._params["QDZ4"] # Variation of peak Dpt" with camber squared
QDZ6 = self._params["QDZ6"] # Peak residual torque Dmr" = Dmr/(Fz*R0)
QDZ7 = self._params["QDZ7"] # Variation of peak factor Dmr" with load
QDZ8 = self._params["QDZ8"] # Variation of peak factor Dmr" with camber
QDZ9 = self._params[
"QDZ9"
] # Variation of peak factor Dmr" with camber and load
QEZ1 = self._params["QEZ1"] # Trail curvature Ept at Fznom
QEZ2 = self._params["QEZ2"] # Variation of curvature Ept with load
QEZ3 = self._params["QEZ3"] # Variation of curvature Ept with load squared
QEZ4 = self._params["QEZ4"] # Variation of curvature Ept with sign of Alpha-t
QEZ5 = self._params["QEZ5"] # Variation of Ept with camber and sign Alpha-t
QHZ1 = self._params["QHZ1"] # Trail horizontal shift Sht at Fznom
QHZ2 = self._params["QHZ2"] # Variation of shift Sht with load
QHZ3 = self._params["QHZ3"] # Variation of shift Sht with camber
QHZ4 = self._params["QHZ4"] # Variation of shift Sht with camber and load
SSZ1 = self._params["SSZ1"] # Nominal value of s/R0: effect of Fx on Mz
SSZ2 = self._params["SSZ2"] # Variation of distance s/R0 with Fy/Fznom
SSZ3 = self._params["SSZ3"] # Variation of distance s/R0 with camber
SSZ4 = self._params["SSZ4"] # Variation of distance s/R0 with load and camber
SHt = QHZ1 + QHZ2 * dfz + (QHZ3 + QHZ4 * dfz) * gamma # (4.E35)
# TODO: need to smoothen sign and avoid divide by zero when kya=0
sign_Kya = lnp.sign(Kya)
# sign_Kya = (Kya + eps_Kya) / lnp.sqrt((Kya + eps_Kya) ** 2 + eps_Kya)
Kya_prime = Kya + epsilonk * sign_Kya # (4.E39)
SHf = SHy + SVy / Kya_prime # (4.E38)
alphar = alpha + SHf # = alphaf (4.E37)
alphat = alpha + SHt # (4.E34)
# IMPORTANT NOTE: The above original equation (4.E43) was not matching the
# TNO solver. The coefficient Dt affects the pneumatic trail (t) and the
# self aligning torque (Mz).
# It was observed that when negative inclination angles where used as
# inputs, there was a discrepancy between the TNO solver and mfeval.
# This difference comes from the term QDZ3, that in the original equation
# is multiplied by abs(gamma_star). But in the paper the equation is
# different and the abs() term is not written. Equation (A60) from the
# paper resulted into a perfect match with TNO.
# Keep in mind that the equations from the paper don't include turn slip
# effects. The term zeta5 has been added although it doesn't appear in the
# paper.
# Paper definition:
Dt = (
(QDZ1 + QDZ2 * dfz)
* (1 + QDZ3 * gamma + QDZ4 * gamma ** 2)
* Fz
* (R0 / Fz0_prime)
* LTR
) # (A60)
# TODO: smoothen abs
abs_gamma = lnp.abs(gamma)
# IMPORTANT NOTE: In the original equation (4.E40) it is used the
# parameter QBZ6, which doesn't exist in the standard TIR files. Also note
# that on page 190 and 615 of the book a full set of parameters is given
# and QBZ6 doesn't appear.
# The equation has been replaced with equation (A58) from the paper.
# Paper definition
Bt = (
(QBZ1 + QBZ2 * dfz + QBZ3 * dfz ** 2)
* (1 + QBZ4 * gamma + QBZ5 * abs_gamma)
* LKY
/ LMUY_star
) # (> 0) (A58)
Ct = QCZ1 # (> 0) (4.E41)
Et = (QEZ1 + QEZ2 * dfz + QEZ3 * dfz ** 2) * (
1 + (QEZ4 + QEZ5 * gamma) * (2.0 / lnp.pi) * lnp.arctan(Bt * Ct * alphat)
) # (<=1) (4.E44)
# NOTE: zeta values from the non-turnslip branch
zeta0 = 1.0
zeta2 = 1.0
zeta6 = 1.0
zeta7 = 1.0
zeta8 = 1.0
# TODO: smoothen this sign
sign_Vcx = lnp.sign(vx)
# NOTE: ommit QDZ10, QDZ11, LKZC terms: MF6.1 only
Dr = (
(
Fz
* R0
* (
(QDZ6 + QDZ7 * dfz) * LRES * zeta2
+ (QDZ8 + QDZ9 * dfz) * gamma * zeta0
)
* LMUY_star
* sign_Vcx
* lnp.cos(alpha)
)
+ zeta8
- 1.0
) # (4.E47)
Br = (
QBZ9 * LKY / LMUY_star + QBZ10 * By * Cy
) * zeta6 # preferred QBZ9=0 (4.E45)
Cr = zeta7 # (4.E46)
# Equations from here onwards are from "calculateMz"
# IMPORTANT Note: The equations 4.E78 and 4.E77 are not used due to small
# differences discovered at negative camber angles with the TNO solver.
# Instead equations A54 and A55 from the paper are used.
# IMPORTANT Note: The coefficient "s" (Equation 4.E76) determines the
# effect of Fx into Mz. The book uses "Fz0_prime" in the formulation,
# but the paper uses "Fz0". The equation (A56) from the paper has a better
# correlation with TNO.
# TODO: smooth these sign functions
sign_alphar = lnp.sign(alphar)
sign_alphat = lnp.sign(alphat)
alphar_eq = (
lnp.arctan(
lnp.sqrt(lnp.tan(alphar) ** 2 + (Kxk / Kya_prime) ** 2 * kappa ** 2)
)
* sign_alphar
) # (A54)
alphat_eq = (
lnp.arctan(
lnp.sqrt(lnp.tan(alphat) ** 2 + (Kxk / Kya_prime) ** 2 * kappa ** 2)
)
* sign_alphat
) # (A55)
# Lever arm of Fx on Mz
s = R0 * (SSZ1 + SSZ2 * (Fy / Fz0) + (SSZ3 + SSZ4 * dfz) * gamma) * LS # (A56)
# Residual torque: aligning torque component from conicity and camber
Mzr = Dr * lnp.cos(Cr * lnp.arctan(Br * alphar_eq)) # (4.E75)
# NOTE: here we omit Fy_prime computation which is not reqruied for MF5.2. In
# MF6.1 and 6.2, the Fy and Fy0 needs to be computed at zero camber in these
# computatino ommited.
# Pneumatic trail
t = (
Dt
* lnp.cos(
Ct
* lnp.arctan(
Bt * alphat_eq - Et * (Bt * alphat_eq - lnp.arctan(Bt * alphat_eq))
)
)
* lnp.cos(alpha_prime)
) # (4.E73)
# IMPORTANT NOTE: the equation below is not written in any source, but "t"
# is multiplied by LFZO in the TNO dteval function. This has been empirically
# discovered.
t = LFZO * t
# MF5.2 equations
Mz = -t * (Fy - SVyk) + Mzr + s * Fx # From the MF-Tire equation manual
return Mz
def _calculate_My(self, Fz, vx, Fx):
"""Calculate the rolling reistance moment of the tire.
In MF5.2, this is a function of vx and Fz only.
See MFeval Solver.calculateMy
"""
Vcx = vx
# [MODEL]
V0 = self._params["LONGVL"] # Nominal speed
# [VERTICAL]
Fz0 = self._params["FNOMIN"] # Nominal wheel load
# [DIMENSION]
R0 = self._params["UNLOADED_RADIUS"] # Free tire radius
# [SCALING_COEFFICIENTS]
LMY = self._params["LMY"] # Scale factor of rolling resistance torque
# [ROLLING_COEFFICIENTS]
QSY1 = self._params["QSY1"] # Rolling resistance torque coefficient
QSY2 = self._params["QSY2"] # Rolling resistance torque depending on Fx
QSY3 = self._params["QSY3"] # Rolling resistance torque depending on speed
QSY4 = self._params["QSY4"] # Rolling resistance torque depending on speed^4
# NOTE: only MF5.2 part from MFeval implemented here
# NOTE: abs(Vcx/V0) has its 'abs' removed as we only consider forward running
My = (
-R0
* Fz
* LMY
* (QSY1 + QSY2 * (Fx / Fz0) + QSY3 * (Vcx / V0) + QSY4 * (Vcx / V0) ** 4)
) # From the MF-Tire equation manual
return My
def _calculate_Mx(self, gamma, Fz, Fy):
"""Calculate the overturning moment of the tire.
See MFeval Solver.calculateMx
"""
# [VERTICAL]
Fz0 = self._params["FNOMIN"] # Nominal wheel load
# [DIMENSION]
R0 = self._params["UNLOADED_RADIUS"] # Free tire radius
# [SCALING_COEFFICIENTS]
LVMX = self._params["LVMX"] # Scale factor of Mx vertical shift
LMX = self._params["LMX"] # Scale factor of overturning couple
# [OVERTURNING_COEFFICIENTS]
QSX1 = self._params["QSX1"] # Vertical shift of overturning moment
QSX2 = self._params["QSX2"] # Camber induced overturning couple
QSX3 = self._params["QSX3"] # Fy induced overturning couple
# NOTE: only the TNO equations in MFeval, but without all effects after QSX3,
# which are MF6.1 only
Mx = R0 * Fz * LMX * (QSX1 * LVMX - QSX2 * gamma + QSX3 * (Fy / Fz0))
return Mx
|
from time import sleep
print(" ")
print("░█████╗░░█████╗░██╗░░░░░░█████╗░██╗░░░██╗██████╗░██╗░░░██╗")
print("██╔══██╗██╔══██╗██║░░░░░██╔══██╗██║░░░██║██╔══██╗╚██╗░██╔╝")
print("██║░░╚═╝███████║██║░░░░░██║░░╚═╝██║░░░██║██████╔╝░╚████╔╝░")
print("██║░░██╗██╔══██║██║░░░░░██║░░██╗██║░░░██║██╔═══╝░░░╚██╔╝░░")
print("╚█████╔╝██║░░██║███████╗╚█████╔╝╚██████╔╝██║░░░░░░░░██║░░░")
print("░╚════╝░╚═╝░░╚═╝╚══════╝░╚════╝░░╚═════╝░╚═╝░░░░░░░░╚═╝░░░")
sleep(3)
print("\nBienvenid@ soy CalcuPy, estoy hecho en python...... Que deseas hacer?\n")
sleep(2)
print("Opciones Disponibles:\n")
sleep(2)
print("* Presione 'Y' para empezar CalcuPy\n")
sleep(2)
print("* Presione 'N' para cancelar inicio de CalcuPy \n ")
sleep(2)
print("* Presione 'C' par ver los creditos de CalcuPy\n")
sleep(2)
yes = str(input("¿Que deseas hacer?: "))
yes_lower = yes.lower()
sleep(1)
print("Comenzamos con las operaciones a continuacion ingresa los numeros de tu preferencia")
sleep(1)
def calculator():
sleep(1)
num_uno= int(input("\nIngrese el Primer Numero: "))
sleep(1)
num_dos= int(input("\ningrese el Segundo Numero: "))
sleep(2)
print("\n espere un momento CalcuPy esta cargando\n")
sleep(2)
print(" Elige una operacion matematica\n")
sleep(1)
print("1.- Suma\n")
sleep(1)
print("2.- Resta \n")
sleep(1)
print("3.- Multiplicacion\n")
sleep(1)
print("4.- Division \n")
sleep(1)
print("5.- Modulo o resto\n")
sleep(1)
global operacion
operacion = int(input("Eliga una de las anteriores opciones: "))
try:
if operacion == 1:
sleep(3)
print("\nHas elegido suma\n")
sleep(1)
complete=f"La operacion {num_uno} + {num_dos} da como resultado: "
print(complete)
print(num_uno + num_dos)
elif operacion == 2:
sleep(3)
print("\nHas elegido Resta\n")
complete=f"La operacion {num_uno} + {num_dos} da como resultado: "
sleep(1)
print(complete)
print(num_uno - num_dos)
elif operacion == 3:
sleep(3)
print("\nHas elegido Multiplicacion\n")
complete=f"La operacion {num_uno} {num_dos} da como resultado: "
sleep(1)
print(complete)
print(num_uno * num_dos)
elif operacion == 4:
sleep(1)
print("\nHas elegido Division\n")
complete=f"La operacion {num_uno} / {num_dos} da como resultado: "
sleep(3)
print(complete)
print(num_uno / num_dos)
elif operacion == 5:
sleep(1)
print("\nHas elegido Modulo o exponte\n")
complete=f"La operacion {num_uno} % {num_dos} da como resultado: "
sleep(3)
print(complete)
print(complete , num_uno % num_dos)
else:
sleep(1)
print("No hay opciones disponibles......\n")
print("Bye see you later")
sleep(2)
print("Chao Gracias, ahora me siento usado XD. Si te gusto Felicita a Alexander")
except:
print("El valor debe ser int(numero)")
num_uno= int(input("Ingrese el Primer numero"))
print("Saliendo")
if yes_lower == "y" :
sleep(5)
calculator()
elif yes_lower =="c" :
sleep(1)
print("\nCreado por AlexanderG, el 28/04/21 para el reto de sendero tecnologico\n")
sleep(1)
print("Hecho con amor, en python\n")
else:
print("Bye")
|
import os
import re
import pandas as pd
from pyBigstick.orbits import df_orbits
import numpy as np
nuclear_data = pd.read_json('pyBigstick/nuclear_data.json')
# script comments
option_comment = '! create densities\n'
name_comment = '! output file name\n'
sps_comment = '! orbit information\n'
valence_comment = '! # of valence protons, neutrons\n'
jz2_comment = '! 2 * jz\n'
parity_comment = '! both parities wanted\n'
fragsize_comment = '! limit on fragment size for breaking Lanczos vectors\n'
hamil_comment = '! interaction file name (pkuo.int)\n'
scaling_comment = '! scaling\n'
end_comment = '! end of reading in Hamiltonian files\n'
diag_comment = '! diagonalization algorithm\n'
states_comment = '! number of state, max Lanczos iteration'
class Nucleus:
def __init__(self, nucl_symbol: str, n_states=6, diag='ld', maxiter=400, fragsize=-1):
symbol, mass_number = re.split('(\d+)', nucl_symbol)[:2]
try:
nucl = nuclear_data[nuclear_data['symbol']== symbol.capitalize()]
proton = int(nucl['atomicNumber'])
neutron = int(mass_number) - proton
if neutron < 0:
raise ValueError('Nucleus does not exist. Neutron number cannot be negative')
except:
raise ValueError('Nucleus does not exist. Check the symbol.')
self.symbol = symbol
self.nucl_symbol = nucl_symbol
self.nucl = nucl
self.A = int(mass_number)
self.p = proton
self.n = neutron
# proton and neutron must be in the same orbit
self.p_orbit = self.__get_orbit(proton)
self.n_orbit = self.__get_orbit(neutron)
if self.p_orbit != self.n_orbit:
raise ValueError(f"Protons ({self.p_orbit}) and neutrons ({self.n_orbit}) are in different orbits. Handle them manually.")
else:
self.orbit = self.p_orbit
self.jz = 0 if self.A %2 == 0 else 1
self.p_valence = self.__get_valence()[0]
self.n_valence = self.__get_valence()[1]
self.int = self.__get_interaction()
self.scaling = f'{1} {2*self.__get_core_orbit_capcity()+2} {self.A} {0.3}'
self.n_states = n_states # number of states,
self.diag = diag # diagonalize algo
self.maxiter = maxiter # max iteration
self.fragsize = fragsize # for parallel job only, -1 means no parallel
# results
self.states = [] # energy level states: staten, E, Ex, J, T
self.densities = [] # density matrices: statei, statej, orba, orbb, Jt, Tt, value
# n is nucleon
def __get_valence(self, hole=True):
core_size = self.__get_core_orbit_capcity()
size = df_orbits.loc[df_orbits.name == self.orbit, 'size'].values[0]
p_valence = self.p - core_size
n_valence = self.n - core_size
# negatives mean holes
if hole == True:
if p_valence > size/2 and p_valence != size:
p_valence = - (size - p_valence)
if n_valence > size/2 and n_valence != size:
n_valence = - (size - n_valence)
return p_valence, n_valence
def __get_orbit(self, n):
if n <= 2:
return 's'
if 2 < n <= 8:
return '0p'
if 8 < n <= 20:
return 'sd'
if 20 < n <= 40:
return 'pf'
if 50 < n <= 82:
return 'jj55'
else:
raise ValueError('Too many nucleons (>82)')
def __get_core_orbit_capcity(self):
return df_orbits.loc[df_orbits.name == self.orbit, 'core_size'].values[0]
def __get_interaction(self):
if self.orbit == 's':
raise ValueError('Interaction not found for s orbits')
try:
return df_orbits.loc[df_orbits.name == self.orbit, 'int'].values[0]
except:
raise ValueError('Interaction not found')
# generate the script
def script_gen(self):
option, end, parity = 'd', 'end', '0'
output = f'{option:20} {option_comment}'
output += f'{self.nucl_symbol:20} {name_comment}'
if self.orbit == 'jj55':
jj55_orbit = 'jj55pn'
output += f'{jj55_orbit:20} {sps_comment}'
else:
output += f'{self.orbit:20} {sps_comment}'
output += f'{str(self.p_valence)} {str(self.n_valence):18} {valence_comment}'
output += f'{str(self.jz):20} {jz2_comment}'
output += f'{parity:20} {parity_comment}'
if self.fragsize != -1:
output += f'{str(self.fragsize):20} {fragsize_comment}'
if self.int == 'jj55pna':
output += 'upn\n'
output += f'{self.int:20} {hamil_comment}'
output += f'{self.scaling:20} {scaling_comment}'
output += f'{end:20} {end_comment}'
output += f'{self.diag:20} {diag_comment}'
output += f'{str(self.n_states)} {str(self.maxiter):18} {states_comment}'
return output
# save the script
def script_save(self):
filepath = f'examples/{self.nucl_symbol}/create_wfn.in'
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as f:
f.write(self.script_gen())
# prepare necessary input files (.sps, .int, etc)
def prepare(self):
commands = f'cp sps/{self.orbit}.sps examples/{self.nucl_symbol}/;'
commands += f'cp ints/{self.int}.int examples/{self.nucl_symbol}/;'
os.system(commands)
def run(self, bs=None, quiet=True):
commands = ''
if bs == None:
raise ValueError('Must specify BIGSTICK path by bs=/bs/path')
commands += f'cd examples/{self.nucl_symbol};'
commands += f'{bs} < create_wfn.in &&'
if quiet == True:
commands += f'rm *.bigstick fort.* {self.nucl_symbol}.lcoef;\n'
os.system(commands)
def __get_levels(self):
filepath = f'examples/{self.nucl_symbol}/{self.nucl_symbol}.res'
with open(filepath, "r") as f:
lines = f.readlines()
split_lines = [line.split() for line in lines]
for line_n ,line in enumerate(split_lines):
if 'State' in line and 'Ex' in line:
headings_n = line_n
break
# the states
for i in range(headings_n+1, headings_n+1+self.n_states):
state = np.array(split_lines[i])
state_n, energy, energy_x, J0, T0 = state
self.states.append([int(state_n), float(energy), float(energy_x), float(J0), float(T0)])
return self.states
# return ij transition in bigstick format
def __get_transition_ij(self, statej=2, statei=1):
filepath = f'examples/{self.nucl_symbol}/{self.nucl_symbol}.dres'
matrix = ''
starting_line, ending_line = 0, 0
with open(filepath) as f:
unsplit_lines = f.readlines()
lines = []
for line in unsplit_lines:
line = line.split()
if line and '++++' in line[0]:
line = []
lines.append(line)
for i in range(len(lines)):
if 'Initial' in lines[i] and 'state' in lines[i]:
if statei == int(lines[i][3]) and statej == int(lines[i+1][3]):
starting_line = i
while True:
line_pivot = starting_line + ending_line
if lines[line_pivot] == []:
break
ending_line += 1
ending_line = starting_line + ending_line
for l in range(starting_line, ending_line):
matrix += unsplit_lines[l]
return matrix
def __get_matrices_ij(self, statej=2, statei=1):
matrix = self.__get_transition_ij(statej, statei)
lines = matrix.split('\n')
lines = [line.split() for line in lines]
for line in lines:
if 'Jt' in line:
Jt = int(line[2][:-1])
if len(line) == 4:
orba, orbb, me0, me1 = line
self.densities.append([statei, statej, int(orba), int(orbb), Jt, 0, float(me0)])
self.densities.append([statei, statej, int(orba), int(orbb), Jt, 1, float(me1)])
return self.densities
def save_results(self):
# initilize
self.states = []
self.densities = []
self.__get_levels()
for i in range(self.n_states):
for j in range(self.n_states):
self.__get_matrices_ij(j, i)
self.states = pd.DataFrame(self.states, columns=['state', 'E', 'Ex', 'J', 'T'])
self.states.index += 1
self.densities = pd.DataFrame(self.densities, columns=['statei', 'statej', 'orba', 'orbb', 'Jt', 'Tt', 'value'])
self.densities['amp'] = self.densities['value'] **2
# check if calculated
def check(self):
filepath = f'examples/{self.nucl_symbol}/{self.nucl_symbol}.wfn'
return os.path.exists(filepath)
def clean(self):
commands = f'rm -rf examples/{self.nucl_symbol}'
os.system(commands)
if __name__ == "__main__":
nu = Nucleus('fe57')
print('jz:', nu.jz)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 22 10:20:05 2021
@author: qizhe
"""
from collections import defaultdict
from typing import List
class Solution:
def majorityElement(self, nums: List[int]) -> List[int]:
"""
读题
1、这题本身不难,建个字典统计一下就是了
2、进阶里面让空间复杂O1
3、其实,最多只需要返回2个数,最终需要存2个,但过程中需要想办法
4、没想出特别好的办法,是不是可以重复利用空间
5、排序一下好像挺好,但排序好像就超了
答案:
1、第一种方案是我的
2、第二进阶版,没接触过想不到,是所谓的“摩尔投票法”
3、本质上是,对拼消耗
"""
ans = []
element1, element2 = 0, 0
vote1, vote2 = 0, 0
for num in nums:
# 如果该元素为第一个元素,则计数加1
if vote1 > 0 and num == element1:
vote1 += 1
# 如果该元素为第二个元素,则计数加1
elif vote2 > 0 and num == element2:
vote2 += 1
# 选择第一个元素
elif vote1 == 0:
element1 = num
vote1 += 1
# 选择第二个元素
elif vote2 == 0:
element2 = num
vote2 += 1
# 如果三个元素均不相同,则相互抵消1次
else:
vote1 -= 1
vote2 -= 1
# 这里就是统计一下 剩下的这两个 element1 和 element2 在数组中有多少个
cnt1, cnt2 = 0, 0
for num in nums:
if vote1 > 0 and num == element1:
cnt1 += 1
if vote2 > 0 and num == element2:
cnt2 += 1
# 检测元素出现的次数是否满足要求
if vote1 > 0 and cnt1 > len(nums) // 3:
ans.append(element1)
if vote2 > 0 and cnt2 > len(nums) // 3:
ans.append(element2)
return ans
if __name__ == '__main__':
solu = Solution()
# nums = [3,2,1,5]
n = 8
inputList = [1]
inputList = [2, 0, 5, 6, 2, 3]
matrix = [["0","1","0","1","0"],["1","1","0","1","1"],["1","1","1","1","1"],["1","0","0","1","0"]]
matrix = []
matrix = [["0"],['1']]
matrix = [["1","0"]]
# inputList = [2, 2]
# time = 3
# beginWord = "ymain"
# endWord = "oecij"
# wordList = ["ymann","yycrj","oecij","ymcnj","yzcrj","yycij","xecij","yecij","ymanj","yzcnj","ymain"]
s1 = "great"
s2 = "rgeat"
s1 = "abcde"
s2 = "caebd"
nums = [1]
result = solu.majorityElement(nums)
output_Str = ' result = ' + str(result)
print(output_Str)
|
import os
import sys
from setuptools import find_packages, setup
IS_RTD = os.environ.get("READTHEDOCS", None)
version = "0.4.0b14.dev0"
long_description = open(os.path.join(os.path.dirname(__file__), "README.rst")).read()
install_requires = [
"morepath==0.19",
"alembic",
"rulez>=0.1.4,<0.2.0",
"inverter>=0.1.0<0.2.0",
"more.cors",
"celery",
"redis",
"jsl",
"pyyaml>=4.2b1",
"more.jsonschema",
"sqlalchemy",
"sqlalchemy_utils",
"more.signals",
"DateTime",
"transitions",
"jsonpath_ng",
"python-dateutil",
"more.jwtauth",
"more.itsdangerous",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"python-dateutil",
"more.cors",
"sqlalchemy_jsonfield",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"more.basicauth",
"cryptography",
"elasticsearch>7.0.0,<8.0.0",
"pamela",
"click",
"cookiecutter",
"eventlet",
"wsgigzip",
"psycopg2",
"colander",
"deform",
"more.chameleon",
"more.static",
"RestrictedPython",
"beaker",
"zstandard",
"oauthlib[signedtoken]",
"requests-oauthlib",
]
if IS_RTD is None:
install_requires.append("python-ldap")
setup(
name="morpfw",
version=version,
description="Web framework based on morepath",
long_description=long_description,
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords="",
author="Izhar Firdaus",
author_email="izhar@kagesenshi.org",
url="http://github.com/morpframework/morpfw",
license="Apache-2.0",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
"test": [
"nose",
"webtest",
"pytest",
"pytest-html",
"pytest_postgresql",
"pytest_rabbitmq",
"pytest-annotate",
"pytest-cov",
"pika",
"mirakuru",
],
"docs": ["sphinxcontrib-httpdomain", "sphinx-click"],
},
entry_points={
"morepath": ["scan=morpfw"],
"console_scripts": [
"morpfw=morpfw.cli.main:main",
"mfw-runmodule=morpfw.cli:run_module",
"mfw-profilemodule=morpfw.cli:run_module_profile",
],
},
)
|
l = [1,2,1,1,3,2]
print(l)
l = list(set(l))
print(l)
|
# 017 -calcule o comprimento do cateto oposto e do cateto adjacente de um triângulo retângulo e
# mostre o comprimento da hipotenusa
import math
cateto1 = float(input('Cateto 1:'))
cateto2 = float(input('Cateto 2:'))
print('O valor da hipotenusa é: {:.2f}' .format(math.hypot(cateto1, cateto2)))
|
#!/usr/bin/env python3
import argparse, os
import qrcode.console_scripts as qr
# Change to script directory:
current_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(current_path)
filename = f'{current_path}/authenticator.txt'
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="path to Google Authenticator exported text file")
args = parser.parse_args()
if args.file:
filename = args.file
# get lines from file into list:
with open(filename, 'r', encoding='UTF-8') as f:
lines = f.readlines()
lines = [line.rstrip() for line in lines]
# generate qr codes from each line in lines list:
number_of_codes = len(lines)
for i, line in enumerate(lines):
i += 1
print('-------------------------------------------------------------------')
print(f'QR CODE [{i} of {number_of_codes}]:')
print(line)
qr.main(args=[line])
if i != number_of_codes:
input('Press Enter to continue...')
|
# modify from clovaai
import logging
import os
import re
import cv2
from torch.utils.data import Dataset
class BaseDataset(Dataset):
def __init__(self, root, gt_txt=None, transform=None, character='abcdefghijklmnopqrstuvwxyz0123456789',
batch_max_length=100000, data_filter=True):
assert type(root) == str
if gt_txt is not None:
assert os.path.isfile(gt_txt)
self.gt_txt = gt_txt
self.root = os.path.abspath(root)
self.character = character
self.batch_max_length = batch_max_length
self.data_filter = data_filter
if transform:
self.transforms = transform
self.samples = 0
self.img_names = []
self.gt_texts = []
self.get_name_list()
self.logger = logging.getLogger()
self.logger.info(f'current dataset length is {self.samples} in {self.root}')
def get_name_list(self):
raise NotImplementedError
def filter(self, label):
if not self.data_filter:
return False
"""We will filter those samples whose length is larger than defined max_length by default."""
character = "".join(sorted(self.character, key=lambda x: ord(x)))
out_of_char = f'[^{character}]'
label = re.sub(out_of_char, '', label.lower()) # replace those character not in self.character with ''
if len(label) > self.batch_max_length: # filter whose label larger than batch_max_length
return True
return False
def __getitem__(self, index):
# default img channel is rgb
img = cv2.imread(self.img_names[index])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
label = self.gt_texts[index]
if self.transforms:
aug = self.transforms(image=img, label=label)
img, label = aug['image'], aug['label']
return img, label
def __len__(self):
return self.samples
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from contextlib import ExitStack
from torch.distributions import biject_to
from torch.distributions.transforms import ComposeTransform
import pyro
import pyro.distributions as dist
from pyro.poutine.plate_messenger import block_plate
from .reparam import Reparam
# TODO Replace with .with_cache() once the following is released:
# https://github.com/probtorch/pytorch/pull/153
def _with_cache(t):
return t.with_cache() if hasattr(t, "with_cache") else t
class UnitJacobianReparam(Reparam):
"""
Reparameterizer for :class:`~torch.distributions.transforms.Transform`
objects whose Jacobian determinant is one.
:param transform: A transform whose Jacobian has determinant 1.
:type transform: ~torch.distributions.transforms.Transform
:param str suffix: A suffix to append to the transformed site.
"""
def __init__(self, transform, suffix="transformed", *,
experimental_allow_batch=False):
self.transform = _with_cache(transform)
self.suffix = suffix
self.experimental_allow_batch = experimental_allow_batch
def __call__(self, name, fn, obs):
assert obs is None, "TransformReparam does not support observe statements"
event_dim = fn.event_dim
transform = self.transform
with ExitStack() as stack:
shift = max(0, transform.event_dim - event_dim)
if shift:
assert self.experimental_allow_batch, (
"Cannot transform along batch dimension; "
"try converting a batch dimension to an event dimension")
# Reshape and mute plates using block_plate.
from pyro.contrib.forecast.util import reshape_batch, reshape_transform_batch
old_shape = fn.batch_shape
new_shape = old_shape[:-shift] + (1,) * shift + old_shape[-shift:]
fn = reshape_batch(fn, new_shape).to_event(shift)
transform = reshape_transform_batch(transform,
old_shape + fn.event_shape,
new_shape + fn.event_shape)
for dim in range(-shift, 0):
stack.enter_context(block_plate(dim=dim))
# Draw noise from the base distribution.
transform = ComposeTransform([_with_cache(biject_to(fn.support).inv),
self.transform])
x_trans = pyro.sample("{}_{}".format(name, self.suffix),
dist.TransformedDistribution(fn, transform))
# Differentiably transform.
x = transform.inv(x_trans) # should be free due to transform cache
if shift:
x = x.reshape(x.shape[:-2 * shift - event_dim] + x.shape[-shift - event_dim:])
# Simulate a pyro.deterministic() site.
new_fn = dist.Delta(x, event_dim=event_dim)
return new_fn, x
|
#!/usr/bin/env python3
import logging
import re
import subprocess
# from collections import namedtuple
import xml.etree.ElementTree as ET
import random
import string
import os
import requests
logger = logging.getLogger(__name__)
def bx_login(endpoint, user, pwd, account, resource_group = 'Default'):
'''Blumix login. If resource group is not defined it defaults to 'Default'.
'''
# login
completed = __run("bx login -a {} -u {} -p {} -c {} -g {}".format(
endpoint, user, pwd, account, resource_group))
return completed.returncode ## we might not need this at all, it will raise an exception if failed
def get_bx_iam_token() :
'''Get bluemix IAM Token. Cloud only be obtained after call to bx_login().
'''
completed = __run('bx iam oauth-tokens')
return completed.stdout.split('\n', 1)[0].split(':')[1].strip()
def get_bx_resource_service_instance(instance_name):
'''Get bluemix resource service instance details.
'''
shout = __run("bx resource service-instance {}".format(instance_name))
return __process_bx_output_to_dict(shout.stdout)
def deploy_docker_as_cf_app(name, docker_image, domain, hostname=None, instances="1", disk_limit='2G', mem_limit='1G'):
'''Deploy docker to CF as app.
Will do it with no-route!!
'''
tokenDict = __get_cr_token()
token = tokenDict['Token']
logger.debug("Token is: {}".format(token))
my_env = os.environ.copy()
my_env["CF_DOCKER_PASSWORD"] = token
logger.debug("Prepared env to run is: {}".format(my_env))
host = hostname
if hostname == None:
host = name
cmd = "cf push -i {} -m {} -k {} -d {} -n {} {} --docker-image {} --docker-username token".format(instances, mem_limit, disk_limit, domain, host, name, docker_image)
__run(cmd, env=my_env)
# cmdAry = ["cf", "push", "-i", instances, "-m", mem_limit, "-k", disk_limit, "-d", domain, "-n", host, name, "--docker-image", docker_image, "--docker-username", "token"]
# logging.debug("About to call: {}".format(cmdAry))
# completed = subprocess.run(cmdAry, check=True, stdout=subprocess.PIPE, encoding='utf-8', env=my_env)
# logger.debug('Call returned with: {}'.format(completed))
def __get_cr_token():
# bx cr token-add
shout = __run("bx cr token-add --non-expiring")
##tokenDict = __process_bx_output_to_dict(shout.stdout)
lines = shout.stdout.split('\n')
tokenDict = {}
for line_no, line in enumerate(lines) :
# print("l{} : {}".format(line_no, line))
if line_no == 2:
# print(re.split(r"\s{2,3}", line))
tokenDict['Identifier'] = re.split(r"\s{2,3}", line)[1].strip()
if line_no == 3:
tokenDict['Token'] = re.split(r"\s+", line)[1].strip()
return tokenDict
def bx_create_bucket(desired_name, cos_url, cos_rid, bucket_location):
'''Creats a bucket and returns it real name
'''
iam_token = get_bx_iam_token()
found_bucket = __find_bucket(desired_name, cos_url, cos_rid, iam_token)
if found_bucket != None:
## found
return found_bucket
else:
## not found so try to create it
logging.info("Bucket {} not found.".format(desired_name))
if not __create_bucket(desired_name, cos_url, cos_rid, iam_token, bucket_location):
## try 5 times with rnd
for i in list(range(1, 5)):
rnd=random.sample(list(string.ascii_lowercase+string.digits), 4)
forged_bucket_name = desired_name + '-' + ''.join(rnd)
logging.info("Trying to create bucket with forged name: {}".format(forged_bucket_name))
if __create_bucket(forged_bucket_name, cos_url, cos_rid, iam_token, bucket_location):
logging.info("Successfully created a bucket in {}".format(i))
return forged_bucket_name
raise Exception("Fail to create bucket after 6 tries. Bailing out.")
return desired_name
def __create_bucket(bucket_name, cos_url, cos_rid, iam_token, bucket_location):
headers= {'Content-Type': 'text/plain; charset=utf-8', \
'Authorization': iam_token, \
'ibm-service-instance-id' : cos_rid }
logging.debug("Bucket Create Headers: {}".format(headers))
data="<CreateBucketConfiguration><LocationConstraint>{}</LocationConstraint></CreateBucketConfiguration>".format(bucket_location)
logging.debug("Data: {}".format(data))
url='{}/{}'.format(cos_url, bucket_name)
logging.debug("Bucket create url {}".format(url))
resp = requests.put(url, headers=headers, data=data)
logging.debug(resp)
if resp.status_code == 200:
print('cucccess')
return True
elif resp.status_code == 409:
tree = ET.ElementTree(ET.fromstring(resp.content))
root = tree.getroot()
error_code = root.find('.//Code')
print(error_code.text)
if error_code.text == 'BucketAlreadyExists' :
logging.warn('Bucket {} already exists somewhere in the wild. Got message: {}'.format(bucket_name, resp.content))
else:
logging.warn('Unknown problem occured: {}'.format(resp.content))
return False
else:
logging.warn('Unknown problem occured: {}'.format(resp.content))
return False
def __find_bucket(desired_name, cos_url, cos_rid, iam_token):
headers= {'Accept': 'application/json', \
'Content-Type': 'application/json', \
'Authorization': iam_token, \
'ibm-service-instance-id' : cos_rid }
logging.debug(headers)
## check if exists
resp = requests.get(cos_url, headers=headers)
if resp.status_code == 200:
content = resp.content.decode('utf-8')
logging.debug(content)
tree = ET.ElementTree(ET.fromstring(content))
root = tree.getroot()
ns = {'s3' : 'http://s3.amazonaws.com/doc/2006-03-01/'}
for bucket in root.findall('.//s3:Bucket', ns):
name = bucket.find('s3:Name', ns)
logging.debug("Found bucket with name: {}".format(name.text))
p = re.compile( '^'+desired_name + r'(-\w{4})?$' )
if p.match(name.text):
## found
logging.info("Bucket found with name {}".format(name.text))
return name.text
return None
else:
raise Exception
## try to create, iterate while not done
return 0
def __run(cmd, check=True, env=None):
logging.debug("About to call: {}".format(cmd))
completed = subprocess.run([cmd], shell=True, check=check, stdout=subprocess.PIPE, encoding='utf-8', env=env)
logger.debug('Call returned with: {}'.format(completed))
return completed
def __process_bx_output_to_dict(output):
'''Process IBM Cloud classical output of key-value pair to dict.
'''
p = re.compile(r'((\w+\s?)+)\:\s+(.+)')
ret = {}
lines = output.split('\n')
for line in lines :
m = p.match(line)
if m :
g = m.groups()
# print("K: -{}- V: -{}-".format(g[0], g[2].strip()))
ret[g[0]]=g[2].strip()
return ret
|
import logging
import time
from django.conf import settings
from .collector import statsd
PROCESSING_TIME_METRIC_PREFIX = getattr(
settings, 'PROCESSING_TIME_METRIC_PREFIX', 'processing_time'
)
REQUESTS_COUNTER_METRIC_PREFIX = getattr(
settings, 'REQUESTS_COUNTER_METRIC_PREFIX', 'requests_count'
)
METRIC_NAME_TMPL = getattr(
settings,
'REQUESTS_METRIC_NAME_TMPL',
'{prefix}.{url_name}.{request_method}.{status_code}'
)
ALL_URLS_METRIC_NAME_TMPL = getattr(
settings,
'REQUESTS_ALL_URLS_METRIC_NAME_TMPL',
'{prefix}_all_urls.{request_method}'
)
UNKNOWN_URL_NAME = 'unknown'
logger = logging.getLogger(__name__)
class RequestMetricsMiddleware(object):
"""
Middleware reporting request metrics (such as processing time) to statsd
How to use it:
* add this middleware at the beginning of MIDDLEWARE_CLASSES in your
settings
* configure statsd in your settings:
http://statsd.readthedocs.io/en/v3.2.1/configure.html#in-django
"""
def process_request(self, request):
request._request_start_time = time.monotonic()
def _collect_metrics(self, request, response):
try:
url_name = request.resolver_match.url_name
except AttributeError:
logger.warning(
'URL resolver not found', extra={'path': request.path}
)
url_name = UNKNOWN_URL_NAME
common_log_params = {
'request_method': request.method,
'url_name': url_name,
'status_code': response.status_code,
}
# processing time
processing_time_metric_name = METRIC_NAME_TMPL.format(
prefix=PROCESSING_TIME_METRIC_PREFIX, **common_log_params
)
processing_time_all_urls_metric_name = ALL_URLS_METRIC_NAME_TMPL.format(
prefix=PROCESSING_TIME_METRIC_PREFIX, **common_log_params
)
processing_time = int(
(time.monotonic() - request._request_start_time) * 1000
)
statsd.timing(processing_time_metric_name, processing_time)
statsd.timing(processing_time_all_urls_metric_name, processing_time)
# requests counter
requests_counter_metric_name = METRIC_NAME_TMPL.format(
prefix=REQUESTS_COUNTER_METRIC_PREFIX, **common_log_params
)
statsd.incr(requests_counter_metric_name)
def process_response(self, request, response):
try:
self._collect_metrics(request, response)
except Exception:
logger.exception('Exception during collecting metrics')
return response
|
"""
Copyright 2020 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import pandas as pd
import pytest
from pandas._libs.tslibs.offsets import CustomBusinessDay
from pandas.testing import assert_series_equal
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures_inflation as tm
import gs_quant.timeseries.measures as tm_rates
from gs_quant.api.gs.assets import GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.data import DataContext
from gs_quant.errors import MqError, MqValueError
from gs_quant.session import GsSession, Environment
from gs_quant.target.common import PricingLocation, Currency as CurrEnum
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Currency, CurrencyEnum, SecurityMaster
from gs_quant.timeseries.measures_inflation import _currency_to_tdapi_inflation_swap_rate_asset, \
INFLATION_RATES_DEFAULTS, TdapiInflationRatesDefaultsProvider
from gs_quant.timeseries.measures_rates import _ClearingHouse
_index = [pd.Timestamp('2021-03-30')]
_test_datasets = ('TEST_DATASET',)
def test_get_floating_rate_option_for_benchmark_retuns_rate():
provider = TdapiInflationRatesDefaultsProvider(INFLATION_RATES_DEFAULTS)
value = provider.get_index_for_benchmark(CurrencyEnum.GBP, "UKRPI")
assert value == "CPI-UKRPI"
def test_get_floating_rate_option_for_benchmark_retuns_rate_usd():
provider = TdapiInflationRatesDefaultsProvider(INFLATION_RATES_DEFAULTS)
value = provider.get_index_for_benchmark(CurrencyEnum.USD, "CPURNSA")
assert value == "CPI-CPURNSA"
def test_currency_to_tdapi_inflation_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures_inflation.Asset.get_identifier', Mock())
with tm_rates.PricingContext(dt.date.today()):
cur = [
{
"currency_assetId": "MAK1FHKH5P5GJSHH",
"currency": "JPY",
"inflation_id": "MA1CENMCA88VXJ28"},
{
"currency_assetId": "MA66CZBQJST05XKG",
"currency": "GBP",
"inflation_id": "MAW75DV9777630QN"},
{
"currency_assetId": "MAJNQPFGN1EBDHAE",
"currency": "EUR",
"inflation_id": "MAJTD8XDA8EJZYRG"},
{
"currency_assetId": "MAZ7RWC904JYHYPS",
"currency": "USD",
"inflation_id": "MA4016GCT3MDRYVY"}
]
for c in cur:
print(c)
asset = Currency(c.get("currency_assetId"), c.get("currency"))
bbid_mock.return_value = c.get("currency")
mqid = _currency_to_tdapi_inflation_swap_rate_asset(asset)
assert mqid == c.get("inflation_id")
bbid_mock.return_value = None
assert _currency_to_tdapi_inflation_swap_rate_asset(asset) == c.get("currency_assetId")
replace.restore()
def test_get_inflation_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, index_type='CPI-JCPNGENF',
pricing_location=PricingLocation.TKO)
defaults = tm._get_inflation_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, index_type='CPI-CPURNSA',
pricing_location=PricingLocation.NYC)
defaults = tm._get_inflation_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, index_type='CPI-CPXTEMU',
pricing_location=PricingLocation.LDN)
defaults = tm._get_inflation_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.GBP, index_type='CPI-UKRPI',
pricing_location=PricingLocation.LDN)
defaults = tm._get_inflation_swap_leg_defaults(CurrEnum.GBP)
assert result_dict == defaults
def test_get_inflation_swap_csa_terms():
valid_ccy = ['EUR', 'GBP', 'USD']
for ccy in valid_ccy:
assert dict(csaTerms=ccy + '-1') == tm._get_inflation_swap_csa_terms(ccy, tm.InflationIndexType.UKRPI.value)
def test_check_valid_indices():
valid_indices = ['UKRPI']
for index in valid_indices:
assert tm.InflationIndexType[index] == tm._check_inflation_index_type(CurrencyEnum.GBP, index)
invalid_indices = ['UKHPI', 'TestCPI']
for index in invalid_indices:
with pytest.raises(MqError):
tm._check_inflation_index_type(CurrencyEnum.GBP, index)
def test_get_tdapi_inflation_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rates', id='MA26QSMPX9990G66', type_='InflationSwap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rates', id='MA44SBCHF192S6FR', type_='InflationSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MA26QSMPX9990G66' == tm._get_tdapi_inflation_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='5y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm._get_tdapi_inflation_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
kwargs = dict(asset_parameters_clearing_house='NONE',
pricing_location='LDN')
with pytest.raises(MqValueError):
tm._get_tdapi_inflation_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MA26QSMPX9990G66', 'MA44SBCHF192S6FR'] == tm._get_tdapi_inflation_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='InflationSwap', asset_parameters_termination_date='5y',
asset_parameters_index=tm.InflationIndexType.UKRPI,
asset_parameters_clearing_house='None', asset_parameters_effective_date='5y',
asset_parameters_notional_currency='GBP',
pricing_location='LDN')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MA26QSMPX9990G66' == tm._get_tdapi_inflation_rates_assets(**kwargs)
replace.restore()
def mock_curr(_cls, _q):
d = {
'swapRate': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def test_inflation_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='5y', index_type='UKRPI', clearing_house='LCH', forward_tenor='5y', real_time=False)
mock_gbp = Currency('MA26QSMPX9990G66', 'GBP')
args['asset'] = mock_gbp
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'GBP'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm.inflation_swap_rate(**args)
args['swap_tenor'] = '5y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm.inflation_swap_rate(**args)
args['forward_tenor'] = '5y'
args['real_time'] = True
with pytest.raises(NotImplementedError):
tm.inflation_swap_rate(**args)
args['real_time'] = False
args['asset'] = Currency('MA666', 'AED')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'AED'
with pytest.raises(NotImplementedError):
tm.inflation_swap_rate(**args)
args['asset'] = mock_gbp
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'GBP'
args['index_type'] = tm.InflationIndexType.TESTCPI
with pytest.raises(MqValueError):
tm.inflation_swap_rate(**args)
args['index_type'] = tm.InflationIndexType.UKRPI
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'GBP'
identifiers = replace('gs_quant.timeseries.measures_inflation._get_tdapi_inflation_rates_assets', Mock())
identifiers.return_value = {'MA26QSMPX9990G66'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm.inflation_swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_inflation._get_tdapi_inflation_rates_assets', Mock())
identifiers.return_value = {'MAZBW57ZPS54ET7K'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAZBW57ZPS54ET7K', 'EUR')
args['index_type'] = 'FRCPXTOB'
args['location'] = PricingLocation.LDN
actual = tm.inflation_swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_inflation_swap_term(mocker):
replace = Replacer()
args = dict(forward_tenor='1y', pricing_date='0d', clearing_house=_ClearingHouse.LCH,
real_time=False)
class ObjectView(object):
def __init__(self, d):
self.__dict__ = d
holidays = replace('gs_quant.datetime.GsCalendar.get', Mock(holidays=[]))
holidays.return_value = ObjectView({'holidays': []})
bd_calendar = replace('gs_quant.timeseries.measures_inflation._get_custom_bd', Mock())
bd_calendar.return_value = CustomBusinessDay()
pricing_date_mock = replace('gs_quant.timeseries.measures_inflation._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [dt.date(2019, 1, 1), dt.date(2019, 1, 1)]
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'ACU'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm.inflation_swap_term(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['real_time'] = True
with pytest.raises(NotImplementedError):
tm.inflation_swap_term(**args)
args['real_time'] = False
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm.inflation_swap_term(**args)
args['forward_tenor'] = '1y'
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm.inflation_swap_term(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_inflation._get_inflation_swap_data', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
actual = tm.inflation_swap_term(**args)
assert actual.empty
series_apply_mock = replace('gs_quant.timeseries.measures_inflation.pd.Series.apply', Mock())
series_apply_mock.return_value = pd.Series([dt.date(2022, 3, 30), dt.date(2023, 3, 30), dt.date(2024, 3, 30),
dt.date(2025, 3, 30)], index=df.index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2026-01-01'):
actual = tm.inflation_swap_term(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=[dt.date(2022, 3, 30), dt.date(2023, 3, 30), dt.date(2024, 3, 30),
dt.date(2025, 3, 30)])
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
args['location'] = PricingLocation.NYC
with DataContext('2019-01-01', '2026-01-01'):
tm.inflation_swap_term(**args)
holidays.return_value = ObjectView({'holidays': ['0d']})
with pytest.raises(MqValueError):
tm.inflation_swap_term(**args)
replace.restore()
if __name__ == '__main__':
pytest.main(args=["test_measures_inflation.py"])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License'); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import hmac
import logging
import json
from collections import OrderedDict
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from dciauth.v2.time import get_now
logger = logging.getLogger(__name__)
TIMESTAMP_FORMAT = "%Y%m%dT%H%M%SZ"
DATESTAMP_FORMAT = "%Y%m%d"
def encode_data(data):
try:
return (data or "").encode("utf-8")
except (UnicodeDecodeError, AttributeError):
return data
def generate_headers(request, credentials):
access_key = credentials.get("access_key")
secret_key = credentials.get("secret_key")
logger.debug("Generate HMAC v2 headers for %s" % access_key)
if not access_key or not secret_key:
return {}
if "payload" in request:
payload = request.pop("payload")
request["data"] = json.dumps(payload)
request["data"] = encode_data(request.get("data"))
now = get_now()
request["timestamp"] = request.get("timestamp", now.strftime(TIMESTAMP_FORMAT))
request["datestamp"] = request.get("datestamp", now.strftime(DATESTAMP_FORMAT))
headers = {
"X-DCI-Date": request["timestamp"],
"Authorization": _build_authorization_header(request, access_key, secret_key),
}
logger.debug("Generated headers %s" % json.dumps(headers, indent=2, sort_keys=True))
return headers
def _build_authorization_header(request, access_key, secret_key):
string_to_sign = _get_string_to_sign(request)
signing_key = _get_signing_key(request, secret_key)
signature = hmac.new(signing_key, string_to_sign, hashlib.sha256).hexdigest()
# pylint: disable=line-too-long
return """{algorithm} Credential={access_key}/{credential_scope}, SignedHeaders={signed_headers}, Signature={signature}""".format(
algorithm=_get_algorithm(request),
access_key=access_key,
credential_scope=_get_credential_scope(request),
signed_headers=_get_signed_headers(request),
signature=signature,
)
# pylint: enable=line-too-long
def _get_string_to_sign(request):
string_to_sign = """{algorithm}
{timestamp}
{credential_scope}
{canonical_request}""".format(
algorithm=_get_algorithm(request),
timestamp=request["timestamp"],
credential_scope=_get_credential_scope(request),
canonical_request=_get_canonical_request(request),
)
logger.debug("String to sign %s" % string_to_sign)
return string_to_sign.encode("utf-8")
def _get_canonical_request(request):
canonical_request = """{method}
{endpoint}
{canonical_querystring}
{canonical_headers}
{signed_headers}
{payload_hash}""".format(
method=request.get("method", "GET"),
endpoint=request.get("endpoint", "/"),
canonical_querystring=_get_canonical_querystring(request),
canonical_headers=_get_canonical_headers(request),
signed_headers=_get_signed_headers(request),
payload_hash=_get_payload_hash(request),
)
logger.debug("Canonical request %s" % canonical_request)
return hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()
def _get_canonical_querystring(request):
params = request.get("params")
return urlencode(_order_dict(params)) if params else ""
def _get_payload_hash(request):
data = request.get("data")
return hashlib.sha256(data).hexdigest()
def _get_canonical_headers(request):
canonical_headers = request.get(
"canonical_headers",
{
"host": request.get("host", "api.distributed-ci.io"),
"x-dci-date": request["timestamp"],
},
)
signed_headers = _get_signed_headers(request)
return (
"\n".join(
["%s:%s" % (h, canonical_headers[h]) for h in signed_headers.split(";")]
)
+ "\n"
)
def _get_credential_scope(request):
return """{datestamp}/{region}/{service}/{request_type}""".format(
datestamp=request["datestamp"],
region=_get_region(request),
service=_get_service(request),
request_type=_get_request_type(request),
)
def _get_signing_key(request, key):
algorithm = _get_algorithm(request)
algo_version = algorithm.replace("-HMAC-SHA256", "")
datestamp = request["datestamp"]
key_date = _sign((algo_version + key).encode("utf-8"), datestamp)
region = _get_region(request)
key_region = _sign(key_date, region)
service = _get_service(request)
key_service = _sign(key_region, service)
request_type = _get_request_type(request)
return _sign(key_service, request_type)
def _sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def _get_signed_headers(request):
return request.get("signed_headers", "host;x-dci-date")
def _get_algorithm(request):
return request.get("algorithm", "DCI2-HMAC-SHA256")
def _get_region(request):
return request.get("region", "BHS3")
def _get_service(request):
return request.get("service", "api")
def _get_request_type(request):
return request.get("request_type", "dci2_request")
def _order_dict(dictionary):
return OrderedDict(sorted(dictionary.items(), key=lambda k: k[0]))
def _lower(headers):
return {key.lower(): value for key, value in headers.items()}
def parse_headers(headers):
headers = _lower(headers)
timestamp = _parse_timestamp(headers)
authorization = headers.get("authorization")
if not timestamp or not authorization:
return None
algorithm, credential, signed_headers, signature = authorization.split(" ")
signature = signature.replace("Signature=", "")
credential = _find_in_str_between(credential, "Credential=", ",").split("/")
if len(credential) != 6:
return None
signed_headers = _find_in_str_between(signed_headers, "SignedHeaders=", ",")
parsed_headers = {
"host": headers.get("host"),
"algorithm": algorithm,
"client_type": credential[0],
"client_id": credential[1],
"datestamp": credential[2],
"region": credential[3],
"service": credential[4],
"request_type": credential[5],
"signed_headers": signed_headers,
"canonical_headers": {h: headers[h] for h in signed_headers.split(";")},
"timestamp": timestamp,
"signature": signature,
}
logger.debug(
"Parsed headers %s" % json.dumps(parsed_headers, indent=2, sort_keys=True)
)
return parsed_headers
def _parse_timestamp(headers):
aws_date_header = "x-amz-date"
dci_date_header = "x-dci-date"
if aws_date_header not in headers and dci_date_header not in headers:
return None
return (
headers[aws_date_header]
if aws_date_header in headers
else headers[dci_date_header]
)
def _find_in_str_between(string, first, last):
try:
start = string.index(first) + len(first)
end = string.index(last, start)
return string[start:end]
except ValueError:
return ""
|
__all__ = [
'FileLoadingException',
'NodeRemovalException',
'NodeReferenceException',
]
class FileLoadingException(Exception):
pass
class NodeRemovalException(Exception):
pass
class NodeReferenceException(Exception):
pass
|
# Generated by Django 2.1.2 on 2019-03-27 09:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wiki', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='documentversion',
name='changed_time',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='documentversion',
name='document',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='versions', to='wiki.Document'),
),
migrations.AlterUniqueTogether(
name='document',
unique_together={('context', 'name')},
),
]
|
# B2053-求一元二次方程
a, b, c = map(float, input().split())
delta = (b ** 2 - 4 * a * c)
if delta < 0:
print('No answer!')
elif delta == 0:
aaaa = ((-b + delta ** 0.5) / (2 * a))
print('x1=x2=' + '%.5f' % (aaaa))
elif delta > 0:
aaaa = ((-b + delta ** 0.5) / (2 * a))
bbbb = ((-b - delta ** 0.5) / (2 * a))
if aaaa > bbbb:
aaaa, bbbb = bbbb, aaaa
print("x1=" + '%.5f' % (aaaa) + ";x2=" + '%.5f' % (bbbb))
|
# Title : Linked list implementation in python
# Author : Kiran raj R.
# Date : 30:10:2020
class Node:
"""Create a node with value provided, the pointer to next is set to None"""
def __init__(self, value):
self.value = value
self.next = None
class Simply_linked_list:
"""create a empty singly linked list """
def __init__(self):
self.head = None
def printList(self):
temp = self.head
while(temp):
print(temp.value)
temp = temp.next
sl_list = Simply_linked_list()
sl_list.head = Node(1)
node2 = Node(2)
node3 = Node(3)
sl_list.head.next = node2
node2.next = node3
sl_list.printList()
|
#!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from codecs import open
from os import path
import sys
import re
import csv
import os
# OptionParser imports
from optparse import OptionParser
from optparse import OptionGroup
# Python 2 and 3 compatibility
if (sys.version_info < (3, 0)):
fd_read_options = 'rb'
fd_write_options = 'wb'
else:
fd_read_options = 'r'
fd_write_options = 'w'
# Handful patterns
# -- Entering address definition block
p_entering_address_block = re.compile(r'^\s*config firewall address$', re.IGNORECASE)
p_entering_subaddress_block = re.compile(r'^\s*config .*$', re.IGNORECASE)
# -- Exiting address definition block
p_exiting_address_block = re.compile(r'^end$', re.IGNORECASE)
# -- Commiting the current address definition and going to the next one
p_address_next = re.compile(r'^next$', re.IGNORECASE)
# -- address number
p_address_number = re.compile(r'^\s*edit\s+(?P<address_number>\S*)', re.IGNORECASE)
# -- address setting
p_address_set = re.compile(r'^\s*set\s+(?P<address_key>\S+)\s+(?P<address_value>.*)$', re.IGNORECASE)
# Functions
def parse(options,full_path):
"""
Parse the data according to several regexes
@param options: options
@rtype: return a list of policies ( [ {'id' : '1', 'srcintf' : 'internal', ...}, {'id' : '2', 'srcintf' : 'external', ...}, ... ] )
and the list of unique seen keys ['id', 'srcintf', 'dstintf', ...]
"""
global p_entering_address_block, p_exiting_address_block, p_address_next, p_address_number, p_address_set
in_address_block = False
address_list = []
address_elem = {}
order_keys = []
if (options.input_file != None):
with open(options.input_file, mode=fd_read_options) as fd_input:
for line in fd_input:
line = line.strip()
# We match a address block
if p_entering_address_block.search(line):
in_address_block = True
# We are in a address block
if in_address_block:
if p_address_number.search(line):
address_number = p_address_number.search(line).group('address_number')
address_number = re.sub('["]', '', address_number)
address_elem['id'] = address_number
if not('id' in order_keys):
order_keys.append('id')
# We match a setting
if p_address_set.search(line):
address_key = p_address_set.search(line).group('address_key')
if not(address_key in order_keys):
order_keys.append(address_key)
address_value = p_address_set.search(line).group('address_value').strip()
address_value = re.sub('["]', '', address_value)
address_elem[address_key] = address_value
# We are done with the current address id
if p_address_next.search(line):
address_list.append(address_elem)
address_elem = {}
# We are exiting the address block
if p_exiting_address_block.search(line):
in_address_block = False
return (address_list, order_keys)
else:
#for files in os.listdir(os.path.abspath(options.input_folder)):
with open(full_path, mode=fd_read_options) as fd_input:
for line in fd_input:
line = line.strip()
# We match a address block
if p_entering_address_block.search(line):
in_address_block = True
# We are in a address block
if in_address_block:
if p_address_number.search(line):
address_number = p_address_number.search(line).group('address_number')
address_elem['id'] = address_number
if not('id' in order_keys):
order_keys.append('id')
# We match a setting
if p_address_set.search(line):
address_key = p_address_set.search(line).group('address_key')
if not(address_key in order_keys):
order_keys.append(address_key)
address_value = p_address_set.search(line).group('address_value').strip()
address_value = re.sub('["]', '', address_value)
address_elem[address_key] = address_value
# We are done with the current address id
if p_address_next.search(line):
address_list.append(address_elem)
address_elem = {}
# We are exiting the address block
if p_exiting_address_block.search(line):
in_address_block = False
return (address_list, order_keys)
|
# Copyright (C) 2014 SignalFuse, Inc.
# Copyright (C) 2015 SignalFx, Inc.
# Maestro extension that can be used for wrapping the execution of a
# long-running service with log management scaffolding, potentially sending the
# log output to a file, or through Pipestash, or both, depending on the use
# case and parameters.
import os
import random
import signal
import subprocess
from ...guestutils import get_container_name, get_service_name, get_node_list
def run_service(cmd, logtype='log', logbase=None, logtarget=None):
"""Wrap the execution of a service with the necessary logging nets.
If logbase is provided (it is by default), log output will be redirected
(or teed) to a file named after the container executing the service inside
the logbase directory.
If Redis nodes are available in the environment as referenced by the given
logtarget, log output will be streamed via pipestash to one of the
available node containers, chosen at random when the service starts.
The way this is accomplished varied on whether logbase is provided or not,
and whether Redis nodes are available:
- if neither, log output flows to stdout and will be captured by
Docker;
- if logbase is provided, but no Redis nodes are available, the
output of the service is directly redirected to the log file;
- if logbase is not provided, but Redis nodes are available, the
output of the service is piped to pipestash;
- if logbase is provided and Redis nodes are available, the output
of the service is piped to a tee that will write the log file, and
the output of the tee is piped to pipestash.
The whole pipeline, whatever its construct is, waits for the service to
terminate. SIGTERM is also redirected from the parent to the service.
"""
if type(cmd) == str:
cmd = cmd.split(' ')
log = logbase \
and os.path.join(logbase, '{}.log'.format(get_container_name())) \
or None
if logbase and not os.path.exists(logbase):
os.makedirs(logbase)
redis = logtarget \
and get_node_list(logtarget, ports=['redis'], minimum=0) \
or None
stdout = redis and subprocess.PIPE or (log and open(log, 'w+') or None)
# Start the service with the provided command.
service = subprocess.Popen(cmd, stdout=stdout,
stderr=subprocess.STDOUT)
last = service
# Connect SIGTERM to the service process.
signal.signal(signal.SIGTERM, lambda signum, frame: service.terminate())
if redis:
if log:
# Tee to a local log file.
tee = subprocess.Popen(['tee', log], stdin=last.stdout,
stdout=subprocess.PIPE)
last.stdout.close()
last = tee
pipestash = subprocess.Popen(
['pipestash', '-t', logtype,
'-r', 'redis://{}/0'.format(random.choice(redis)),
'-R', 'logstash',
'-f', 'service={}'.format(get_service_name()),
'-S', get_container_name()],
stdin=last.stdout)
last.stdout.close()
last = pipestash
# Wait for the service to exit and return its return code.
last.communicate()
return service.wait()
|
# 由真值和预测值的txt文档得到混淆矩阵
import numpy as np
def mixmatrix():
# 初始化,a矩阵用来存结果
a = np.zeros((7,7), dtype = int)
try:
f_pred = open('predvalue.txt', mode='r')
f_true = open('truevalue.txt', mode='r')
lines_t = f_true.readlines()
lines_p = f_pred.readlines()
for i in range(len(lines_t)):
tt = lines_t[i]
pp = lines_p[i]
tt = int(tt[0])
pp = int(pp[0])
a[tt][pp] += 1
print(a)
finally:
f_pred.close()
f_true.close()
|
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse( 'recipe:recipe-list' )
def image_upload_url( recipe_id ):
"""Return URL for recipe image upload"""
return reverse( 'recipe:recipe-upload-image', args=[ recipe_id ] )
def detail_url( recipe_id ):
"""Return recipe detail URL"""
return reverse( 'recipe:recipe-detail', args=[ recipe_id ] )
def sample_tag( user, name='Main course' ):
"""Create and return a sample tag"""
return Tag.objects.create( user=user, name=name )
def sample_ingredient( user, name='Cinnamon' ):
"""Create and return a sample ingredient"""
return Ingredient.objects.create( user=user, name=name )
def sample_recipe( user, **params ):
"""Creating a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update( params )
return Recipe.objects.create( user=user, **defaults )
class PublicRecipeApiTests( TestCase ):
"""Test unauthenticated recipe API access"""
def setUp( self ):
self.client = APIClient()
def test_auth_required( self ):
"""Test that authentication is required"""
res = self.client.get( RECIPES_URL )
self.assertEqual( res.status_code, status.HTTP_401_UNAUTHORIZED )
class PrivateRecipeApiTests( TestCase ):
"""Test unauthenticated recipe API access"""
def setUp( self ):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@londonappdev.com',
'testpass'
)
self.client.force_authenticate( self.user )
def test_retrieve_recipes( self ):
"""Test retrieving a list of recipes"""
sample_recipe( user=self.user )
sample_recipe( user=self.user )
res = self.client.get( RECIPES_URL )
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer( recipes, many=True )
self.assertEqual( res.status_code, status.HTTP_200_OK )
self.assertEqual( res.data, serializer.data )
def test_recipes_limited_to_user( self ):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'other@londonappdev.com',
'password123'
)
sample_recipe( user=user2 )
sample_recipe( user=self.user )
res = self.client.get( RECIPES_URL )
recipes = Recipe.objects.filter( user=self.user )
serializer = RecipeSerializer( recipes, many=True )
self.assertEqual( res.status_code, status.HTTP_200_OK )
self.assertEqual( len( res.data ), 1 )
self.assertEqual( res.data, serializer.data )
def test_view_recipe_detail( self ):
"""Test viewing a recipe detail"""
recipe = sample_recipe( user=self.user )
recipe.tags.add( sample_tag( user=self.user ) )
recipe.ingredients.add( sample_ingredient( user=self.user ) )
url = detail_url( recipe.id )
res = self.client.get( url )
serializer = RecipeDetailSerializer( recipe )
self.assertEqual( res.data, serializer.data )
def test_create_basic_recipe( self ):
"""Test creating recipe"""
payload = {
'title': 'Chocolate cheesecake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post( RECIPES_URL, payload )
self.assertEqual( res.status_code, status.HTTP_201_CREATED )
recipe = Recipe.objects.get( id = res.data[ 'id' ] )
for key in payload.keys() :
self.assertEqual( payload[ key ], getattr( recipe, key ) )
def test_create_recipe_with_tags( self ):
"""Test creating a recipe with tags"""
tag1 = sample_tag(
user = self.user,
name = 'Vegan'
)
tag2 = sample_tag(
user = self.user,
name = 'Dessert'
)
payload = {
'title': 'Avocade lime cheesecake',
'tags': [ tag1.id, tag2.id ],
'time_minutes': 60,
'price': 20.00
}
res = self.client.post( RECIPES_URL, payload )
self.assertEqual( res.status_code, status.HTTP_201_CREATED )
recipe = Recipe.objects.get(
id = res.data[ 'id' ]
)
tags = recipe.tags.all()
self.assertEqual( tags.count(), 2 )
self.assertIn( tag1, tags )
self.assertIn( tag2, tags )
def test_create_recipe_with_ingredients( self ):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(
user = self.user,
name = 'Prawns'
)
ingredient2 = sample_ingredient(
user = self.user,
name = 'Ginger'
)
payload = {
'title' : 'Thai praws red curry',
'ingredients' : [ ingredient1.id, ingredient2.id ],
'time_minutes' : 20,
'price' : 7.00
}
res = self.client.post( RECIPES_URL, payload )
self.assertEqual( res.status_code, status.HTTP_201_CREATED )
recipe = Recipe.objects.get( id = res.data[ 'id' ] )
ingredients = recipe.ingredients.all()
self.assertEqual( ingredients.count(), 2 )
self.assertIn( ingredient1, ingredients )
self.assertIn( ingredient2, ingredients )
def test_partial_update_recipe( self ):
"""Test updating a recipe with patch"""
recipe = sample_recipe( user = self.user )
recipe.tags.add( sample_tag( user = self.user ) )
new_tag = sample_tag( user = self.user, name = 'Curry' )
payload = {
'title': 'Chicken tikka',
'tags': [ new_tag.id ]
}
url = detail_url( recipe.id )
self.client.patch( url, payload )
recipe.refresh_from_db()
self.assertEqual( recipe.title, payload[ 'title' ] )
tags = recipe.tags.all()
self.assertEqual( len(tags), 1 )
self.assertIn( new_tag, tags )
def test_full_update_recipe( self ):
"""Test updating a recipe with put"""
recipe = sample_recipe( user = self.user )
recipe.tags.add( sample_tag( user = self.user ) )
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url( recipe.id )
self.client.put( url, payload )
recipe.refresh_from_db()
self.assertEqual( recipe.title, payload[ 'title' ] )
self.assertEqual( recipe.time_minutes, payload[ 'time_minutes' ] )
self.assertEqual( recipe.price, payload[ 'price' ] )
tags = recipe.tags.all()
self.assertEqual( len(tags), 0 )
class RecipeImageUploadTests( TestCase ):
"""Testing the image upload for recipe model"""
def setUp( self ):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'user@londonappdev.com',
'testpass'
)
self.client.force_authenticate( self.user )
self.recipe = sample_recipe( user=self.user )
def tearDown( self ):
self.recipe.image.delete()
def test_upload_image_to_recipe( self ):
"""Test uploading an image to recipe"""
url = image_upload_url( self.recipe.id )
with tempfile.NamedTemporaryFile( suffix='.jpg' ) as ntf:
img = Image.new( 'RGB', ( 10, 10 ) )
img.save( ntf, format='JPEG' )
ntf.seek( 0 )
res = self.client.post( url, { 'image': ntf }, format='multipart' )
self.recipe.refresh_from_db()
self.assertEqual( res.status_code, status.HTTP_200_OK )
self.assertIn( 'image', res.data )
self.assertTrue( os.path.exists( self.recipe.image.path ) )
def test_upload_image_bad_request( self ):
"""Test uploading an invalid image"""
url = image_upload_url( self.recipe.id )
res = self.client.post( url, { 'image': 'notimage' }, format='multipart' )
self.assertEqual( res.status_code, status.HTTP_400_BAD_REQUEST )
def test_filter_recipes_by_tags( self ):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe( user=self.user, title='Thai vegetable curry' )
recipe2 = sample_recipe( user=self.user, title='Aubergine with tahini' )
tag1 = sample_tag( user=self.user, name='Vegan' )
tag2 = sample_tag( user=self.user, name='Vegetarian' )
recipe1.tags.add( tag1 )
recipe2.tags.add( tag2 )
recipe3 = sample_recipe( user=self.user, title='Fish and chips' )
res = self.client.get(
RECIPES_URL,
{ 'tags': f'{tag1.id},{tag2.id}' }
)
serializer1 = RecipeSerializer( recipe1 )
serializer2 = RecipeSerializer( recipe2 )
serializer3 = RecipeSerializer( recipe3 )
self.assertIn( serializer1.data, res.data )
self.assertIn( serializer2.data, res.data )
self.assertNotIn( serializer3.data, res.data )
def test_filter_recipes_by_ingredients( self ):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe( user=self.user, title='Post beans on toast' )
recipe2 = sample_recipe( user=self.user, title='Chicken cacciatore' )
ingredient1 = sample_ingredient( user=self.user, name='Feta cheese' )
ingredient2 = sample_ingredient( user=self.user, name='Chicken' )
recipe1.ingredients.add( ingredient1 )
recipe2.ingredients.add( ingredient2 )
recipe3 = sample_recipe( user=self.user, title='Steak and mushrooms' )
res = self.client.get(
RECIPES_URL,
{ 'ingredients': f'{ingredient1.id},{ingredient2.id}' }
)
serializer1 = RecipeSerializer( recipe1 )
serializer2 = RecipeSerializer( recipe2 )
serializer3 = RecipeSerializer( recipe3 )
self.assertIn( serializer1.data, res.data )
self.assertIn( serializer2.data, res.data )
self.assertNotIn( serializer3.data, res.data )
|
"""train a handwritten digit classifier."""
from typing import List, Mapping, Tuple
import jax
import jax.numpy as jnp
import opax
import pax
import tensorflow_datasets as tfds
from tqdm.auto import tqdm
Batch = Mapping[str, jnp.ndarray]
class ConvNet(pax.Module):
"""ConvNet module."""
layers: List[Tuple[pax.Conv2D, pax.BatchNorm2D]]
output: pax.Conv2D
def __init__(self):
super().__init__()
self.layers = []
for i in range(5):
conv = pax.Conv2D((1 if i == 0 else 32), 32, 6, padding="VALID")
batchnorm = pax.BatchNorm2D(32, True, True, 0.9)
self.layers.append((conv, batchnorm))
self.output = pax.Conv2D(32, 10, 3, padding="VALID")
def __call__(self, x: jnp.ndarray):
for conv, batchnorm in self.layers:
x = batchnorm(conv(x))
x = jax.nn.relu(x)
x = self.output(x)
return jnp.squeeze(x, (1, 2))
def loss_fn(model: ConvNet, batch: Batch):
x = batch["image"].astype(jnp.float32) / 255
target = batch["label"]
model, logits = pax.module_and_value(model)(x)
log_pr = jax.nn.log_softmax(logits, axis=-1)
log_pr = jnp.sum(jax.nn.one_hot(target, log_pr.shape[-1]) * log_pr, axis=-1)
loss = -jnp.mean(log_pr)
return loss, model
@jax.jit
def test_loss_fn(model: ConvNet, batch: Batch):
model = model.eval()
return loss_fn(model, batch)[0]
@jax.jit
def train_step(model: ConvNet, optimizer: opax.GradientTransformation, batch: Batch):
(loss, model), grads = pax.value_and_grad(loss_fn, has_aux=True)(model, batch)
params = model.parameters()
updates, optimizer = opax.transform_gradients(grads, optimizer, params=params)
new_params = opax.apply_updates(params, updates=updates)
model = model.update_parameters(new_params)
return model, optimizer, loss
def load_dataset(split: str):
"""Loads the dataset as a tensorflow dataset."""
dataset = tfds.load("mnist:3.*.*", split=split)
return dataset
def train(
batch_size=32,
num_epochs=10,
learning_rate=1e-4,
weight_decay=1e-4,
):
# seed random key
pax.seed_rng_key(42)
# model & optimizer
net = ConvNet()
print(net.summary())
optimizer = opax.chain(
opax.clip_by_global_norm(1.0),
opax.adamw(learning_rate=learning_rate, weight_decay=weight_decay),
).init(net.parameters())
# data
train_data = load_dataset("train").shuffle(10 * batch_size).batch(batch_size)
test_data = load_dataset("test").shuffle(10 * batch_size).batch(batch_size)
# training loop
for epoch in range(num_epochs):
losses, global_norm = 0.0, 0.0
for batch in tqdm(train_data, desc="train", leave=False):
batch = jax.tree_map(lambda x: x.numpy(), batch)
net, optimizer, loss = train_step(net, optimizer, batch)
losses = losses + loss
global_norm = global_norm + optimizer[0].global_norm
loss = losses / len(train_data)
global_norm = global_norm / len(train_data)
test_losses = 0.0
for batch in tqdm(test_data, desc="eval", leave=False):
batch = jax.tree_map(lambda x: x.numpy(), batch)
test_losses = test_losses + test_loss_fn(net, batch)
test_loss = test_losses / len(test_data)
print(
"[Epoch %d] train loss %.3f test loss %.3f global norm %.3f"
% (epoch, loss, test_loss, global_norm)
)
return net
if __name__ == "__main__":
train()
|
from enum import IntEnum
class TriggerCategory(IntEnum):
Undefined = -1
Cause = 0,
Condition = 1,
Area = 3,
Filter = 4,
Effect = 5
|
#Code for training the model
# WRITTEN BY:
# John Torr (john.torr@cantab.net)
#standard library imports
import argparse
import copy
import os
from numpy import float64
from time import strftime
from typing import DefaultDict, List, Tuple
#third party imports
import torch.optim as optim
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from torch.nn import CrossEntropyLoss
from torchtext.data.iterator import BucketIterator, Iterator
#local imports
from config import *
from constants import *
from file_handler import *
from lstm_model import LSTMTagger
from prepare_data import *
from utils import *
evalModelReturn = Tuple[float64, float, float64, float64, float64, float64, float64, float64]
models_folder = 'models'
device = torch.device("cuda:0" if (torch.cuda.is_available() and use_cuda_if_available) else "cpu")
def main(data_path: str, saved_model_path: str) -> None:
"""The main training function"""
if saved_model_path:
global embedding_dim, char_embedding_dim, hidden_dim, char_hidden_dim, use_bert_cased, \
use_bert_uncased, use_bert_large
embedding_dim, char_embedding_dim, hidden_dim, char_hidden_dim, use_bert_cased, use_bert_uncased, \
use_bert_large = load_hyper_params(saved_model_path)
if use_bert_uncased or use_bert_cased:
use_bert = True
else:
use_bert = False
if use_bert:
train_iter, \
val_iter, \
word_to_ix, \
ix_to_word, \
tag_vocab, \
char_to_ix = create_bert_datasets(
data_path=data_path,
mode=TRAIN,
use_bert_cased=use_bert_cased,
use_bert_uncased=use_bert_uncased,
use_bert_large=use_bert_large
)
vocab_size = None
word_vocab = None
else:
train_iter, \
val_iter, \
word_vocab, \
tag_vocab, \
char_to_ix = create_datasets(data_path=data_path, mode=TRAIN)
#char_to_ix gets added to automatically with any characters (e.g. < >) encountered during evaluation, but we want to
#save the original copy so that the char embeddings para can be computed, hence we create a copy here.
word_to_ix, ix_to_word = word_vocab.stoi, word_vocab.itos
vocab_size = len(word_to_ix)
tag_to_ix, ix_to_tag = tag_vocab.stoi, tag_vocab.itos
char_to_ix_original = copy.deepcopy(char_to_ix)
word_vocab_original = copy.deepcopy(word_vocab)
word_to_ix_original = copy.deepcopy(word_to_ix)
ix_to_word_original = copy.deepcopy(ix_to_word)
tag_vocab_original = copy.deepcopy(tag_vocab)
model = LSTMTagger(
embedding_dim=embedding_dim,
hidden_dim=hidden_dim,
vocab_size=vocab_size,
tagset_size=len(tag_to_ix),
char_embedding_dim=char_embedding_dim,
char_hidden_dim=char_hidden_dim,
char_vocab_size=len(char_to_ix),
use_bert_cased=use_bert_cased,
use_bert_uncased=use_bert_uncased,
use_bert_large=use_bert_large
)
loss_function = CrossEntropyLoss(ignore_index=tag_to_ix['<pad>'])
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
if models_folder not in os.listdir(".."):
os.mkdir(os.path.join("..", models_folder))
if saved_model_path:
av_train_losses, \
av_eval_losses, \
checkpoint_epoch, \
best_accuracy, \
lowest_av_eval_loss, \
best_micro_precision, \
best_micro_recall, \
best_micro_f1, \
best_weighted_macro_precision, \
best_weighted_macro_recall, \
best_weighted_macro_f1 = load_model(model=model,
saved_model_path=saved_model_path,
optimizer=optimizer)
model_file_name = os.path.split(saved_model_path)[1]
else:
checkpoint_epoch = 0
av_train_losses = []
av_eval_losses = []
lowest_av_eval_loss = 999999
model_file_name = strftime("%Y_%m_%d_%H_%M_%S.pt")
#torch.autograd.set_detect_anomaly(True)
print("training..\n")
model.train()
start_epoch = checkpoint_epoch+1
end_epoch = checkpoint_epoch+num_epochs
for epoch in range(start_epoch, end_epoch+1): # again, normally you would NOT do 300 epochs, it is toy data
model.train()
print('===============================')
print('\n======== Epoch {} / {} ========'.format(epoch, end_epoch))
batch_num = 0
train_losses = []
for batch in train_iter:
batch_num += 1
if batch_num % 20 == 0 or batch_num == 1:
if batch_num != 1:
print("\nAverage Training loss for epoch {} at end of batch {}: {}".format(epoch, str(batch_num-1),sum(train_losses)/len(train_losses)))
print('\n======== at batch {} / {} ========'.format(batch_num, len(train_iter)))
model.zero_grad()
if use_bert:
sentences_in, attention_masks, token_start_idx, targets, original_sentences = batch
sentences_in = sentences_in.to(device)
attention_masks = attention_masks.to(device)
targets = targets.to(device)
max_length = (attention_masks != 0).max(0)[0].nonzero()[-1].item()+1
if max_length < sentences_in.shape[1]:
sentences_in = sentences_in[:, :max_length]
attention_masks = attention_masks[:, :max_length]
sent_batch_size = sentences_in.shape[0]
original_sentences_split = [sent.split() for sent in original_sentences]
word_batch_size = max([len(sent) for sent in original_sentences_split])
sent_lengths = [item for item in map(len, token_start_idx)]
else:
word_batch_size = batch.sentence.shape[0]
sent_batch_size = batch.sentence.shape[1]
sentences_in = batch.sentence.permute(1, 0).to(device)
targets = batch.tags.permute(1, 0).reshape(sent_batch_size * word_batch_size).to(device)
attention_masks = None
token_start_idx = None
original_sentences_split = None
sent_lengths = train_iter.sent_lengths[batch_num - 1]
words_in = get_words_in(
sentences_in=sentences_in,
char_to_ix=char_to_ix,
ix_to_word=ix_to_word,
device=device,
original_sentences_split=original_sentences_split
)
model.init_hidden(sent_batch_size=sent_batch_size, device=device)
tag_logits = model(
sentences=sentences_in,
words=words_in,
char_hidden_dim=char_hidden_dim,
sent_lengths=sent_lengths,
word_batch_size=word_batch_size,
device=device,
attention_masks=attention_masks,
token_start_idx=token_start_idx
)
mask = targets != 1
loss = loss_function(tag_logits, targets)
loss /= mask.float().sum()
train_losses.append(loss.item())
loss.backward()
optimizer.step()
av_train_losses.append(sum(train_losses) / len(train_losses))
accuracy, av_eval_loss, micro_precision, micro_recall, micro_f1, weighted_macro_precision, \
weighted_macro_recall, weighted_macro_f1 = eval_model(
model=model,
loss_function=loss_function,
val_iter=val_iter,
char_to_ix=char_to_ix,
ix_to_word=ix_to_word,
ix_to_tag=ix_to_tag,
av_eval_losses=av_eval_losses,
use_bert=use_bert
)
print_results(epoch, accuracy, av_eval_loss, micro_precision, micro_recall, micro_f1, weighted_macro_precision, weighted_macro_recall, weighted_macro_f1)
if av_eval_losses[-1] < lowest_av_eval_loss:
lowest_av_eval_loss = av_eval_losses[-1]
best_accuracy, \
best_micro_precision, \
best_micro_recall, \
best_micro_f1, \
best_weighted_macro_precision, \
best_weighted_macro_recall, \
best_weighted_macro_f1 = accuracy, \
micro_precision, \
micro_recall, \
micro_f1, \
weighted_macro_precision, \
weighted_macro_recall, \
weighted_macro_f1
checkpoint_epoch = epoch
save_model(
epoch=checkpoint_epoch,
model=model,
optimizer=optimizer,
av_train_losses=av_train_losses,
av_eval_losses=av_eval_losses,
model_file_name=model_file_name,
word_to_ix=word_to_ix_original,
ix_to_word=ix_to_word_original,
word_vocab=word_vocab_original,
tag_vocab=tag_vocab_original,
char_to_ix=char_to_ix_original,
models_folder=models_folder,
embedding_dim=embedding_dim,
char_embedding_dim=char_embedding_dim,
hidden_dim=hidden_dim,
char_hidden_dim=char_hidden_dim,
accuracy=best_accuracy,
av_eval_loss=lowest_av_eval_loss,
micro_precision=best_micro_precision,
micro_recall=best_micro_recall,
micro_f1=best_micro_f1,
weighted_macro_precision=best_weighted_macro_precision,
weighted_macro_recall=best_weighted_macro_recall,
weighted_macro_f1=best_weighted_macro_f1,
use_bert_cased=use_bert_cased,
use_bert_uncased=use_bert_uncased,
use_bert_large=use_bert_large
)
print_results(
epoch=checkpoint_epoch,
accuracy=best_accuracy,
av_eval_loss=lowest_av_eval_loss,
micro_precision=best_micro_precision,
micro_recall=best_micro_recall,
micro_f1=best_micro_f1,
weighted_macro_precision=best_weighted_macro_precision,
weighted_macro_recall=best_weighted_macro_recall,
weighted_macro_f1=best_weighted_macro_f1,
final=True
)
plot_train_eval_loss(av_train_losses, av_eval_losses)
def eval_model(
model: LSTMTagger,
loss_function: CrossEntropyLoss,
val_iter: BucketIterator,
char_to_ix: DefaultDict[str, int],
ix_to_word: List[str],
ix_to_tag: List[str],
av_eval_losses: List[str],
use_bert: bool
) -> evalModelReturn:
"""
Function for evaluating the model being trained.
"""
model.eval()
y_pred = []
y_true = []
print("\nEvaluating model...")
with torch.no_grad():
batch_num = 0
eval_losses = []
for batch in val_iter:
batch_num += 1
if use_bert:
sentences_in, attention_masks, token_start_idx, targets, original_sentences = batch
sentences_in = sentences_in.to(device)
attention_masks = attention_masks.to(device)
targets = targets.to(device)
max_length = (attention_masks != 0).max(0)[0].nonzero()[-1].item() + 1
if max_length < sentences_in.shape[1]:
sentences_in = sentences_in[:, :max_length]
attention_masks = attention_masks[:, :max_length]
sent_batch_size = sentences_in.shape[0]
original_sentences_split = [sent.split() for sent in original_sentences]
word_batch_size = max([len(sent) for sent in original_sentences_split])
sent_lengths = [item for item in map(len, token_start_idx)]
else:
word_batch_size = batch.sentence.shape[0]
sent_batch_size = batch.sentence.shape[1]
sentences_in = batch.sentence.permute(1, 0).to(device)
targets = batch.tags.permute(1, 0).reshape(sent_batch_size * word_batch_size).to(device)
attention_masks = None
token_start_idx = None
original_sentences_split = None
sent_lengths = val_iter.sent_lengths[batch_num - 1]
y_true += [ix_to_tag[ix.item()] for ix in targets]
words_in = get_words_in(
sentences_in=sentences_in,
char_to_ix=char_to_ix,
ix_to_word=ix_to_word,
device=device,
original_sentences_split=original_sentences_split
)
model.init_hidden(sent_batch_size=sent_batch_size, device=device)
tag_logits = model(
sentences=sentences_in,
words=words_in,
char_hidden_dim=char_hidden_dim,
sent_lengths=sent_lengths,
word_batch_size=word_batch_size,
device=device,
attention_masks=attention_masks,
token_start_idx=token_start_idx
)
eval_loss = loss_function(tag_logits, targets)
mask = targets != 1
eval_loss /= mask.float().sum()
eval_losses.append(eval_loss.item())
pred = categoriesFromOutput(tag_logits, ix_to_tag)
y_pred += pred
av_eval_losses.append(sum(eval_losses) / len(eval_losses))
y_true, y_pred = remove_pads(y_true, y_pred)
accuracy = accuracy_score(y_true, y_pred)
micro_precision, micro_recall, micro_f1, support = precision_recall_fscore_support(y_true, y_pred,
average='micro')
weighted_macro_precision, weighted_macro_recall, weighted_macro_f1, _ = precision_recall_fscore_support(y_true,
y_pred,
average='weighted')
av_eval_loss = sum(eval_losses) / len(eval_losses)
return accuracy, av_eval_loss, micro_precision, micro_recall, micro_f1, weighted_macro_precision, \
weighted_macro_recall, weighted_macro_f1
def print_results(
epoch: int,
accuracy: float64,
av_eval_loss: float,
micro_precision: float64,
micro_recall: float64,
micro_f1: float64,
weighted_macro_precision: float64,
weighted_macro_recall: float64,
weighted_macro_f1: float64,
final=False
) -> None:
if not final:
print("\nEval results at end of epoch {}".format(epoch)+":\n")
else:
print("\nBest eval results were obtained on epoch {} and are shown below:\n".format(epoch))
print("Eval accuracy: {:.2f}%".format(accuracy * 100))
print("Average Eval loss: {}".format(str(av_eval_loss)))
print("Micro Precision: {}".format(micro_precision))
print("Micro Recall: {}".format(micro_recall))
print("Micro F1: {}".format(micro_f1))
print("Weighted Macro Precision: {}".format(weighted_macro_precision))
print("Weighted Macro Recall: {}".format(weighted_macro_recall))
print("Weighted Macro F1: {}".format(weighted_macro_f1))
if __name__ == '__main__':
cmd_parser = argparse.ArgumentParser(description='command line arguments.')
cmd_parser.add_argument('--data-path', dest='data_path', type=str, nargs=1, help='the path to the folder containing the data files')
cmd_parser.add_argument('--model-path', dest='saved_model_path', type=str, nargs=1,
help='the relative path to a model you wish to resume training from')
args = cmd_parser.parse_args()
if use_bert_cased and use_bert_uncased:
raise Exception("Both use_bert_cased and use_bert_uncased are set to True in config.py!! \
Please edit the file so that at most one of these is set to true.")
if args.saved_model_path:
saved_model_path = args.saved_model_path[0]
else:
saved_model_path = None
main(args.data_path[0], saved_model_path)
|
import functools
import inspect
from types import MappingProxyType
from typing import Any, Callable, Dict, List, Optional, Tuple
def autoincrement(fn: Optional[Callable] = None, *, start: int = 1): # pragma: no cover
"""Decorate registered callables to provide them with a source of uniqueness.
Args:
fn: The callable
start: The starting number of the sequence to generate
Examples:
>>> @autoincrement
... def new(autoincrement=1):
... return autoincrement
>>> new()
1
>>> new()
2
>>> @autoincrement(start=4)
... def new(autoincrement=1):
... return autoincrement
>>> new()
4
>>> new()
5
"""
def wrapper(fn):
wrapper.initial = start
@functools.wraps(fn)
def decorator(*args, **kwargs):
result = fn(*args, autoincrement=wrapper.initial, **kwargs)
wrapper.initial += 1
return result
return decorator
if fn:
return wrapper(fn)
return wrapper
def for_model(typ):
"""Decorate a factory that returns a `Mapping` type in order to coerce it into the `typ`.
This decorator is only invoked in the context of model factory usage. The intent is that
a factory function could be more generally useful, such as to create API inputs, that
also happen to correspond to the creation of a model when invoked during a test.
Examples:
>>> class Model:
... def __init__(self, **kwargs):
... self.kw = kwargs
...
... def __repr__(self):
... return f"Model(a={self.kw['a']}, b={self.kw['b']}, c={self.kw['c']})"
>>> @for_model(Model)
... def new_model(a, b, c):
... return {'a': a, 'b': b, 'c': c}
>>> new_model(1, 2, 3)
{'a': 1, 'b': 2, 'c': 3}
>>> new_model.for_model(1, 2, 3)
Model(a=1, b=2, c=3)
"""
def wrapper(fn):
def for_model(*args, **kwargs):
result = fn(*args, **kwargs)
return typ(**result)
fn.for_model = for_model
return fn
return wrapper
class fluent:
"""Decorate a function with `fluent` to enable it to be called in a "fluent" style.
Examples:
>>> @fluent
... def foo(a, b=None, *args, c=3, **kwargs):
... print(f'(a={a}, b={b}, c={c}, args={args}, kwargs={kwargs})')
>>> foo.kwargs(much=True, surprise='wow').a(4).bind()
(a=4, b=None, c=3, args=(), kwargs={'much': True, 'surprise': 'wow'})
>>> foo.args(True, 'wow').a(5).bind()
(a=5, b=None, c=3, args=(True, 'wow'), kwargs={})
>>> partial = foo.a(1)
>>> partial.b(5).bind()
(a=1, b=5, c=3, args=(), kwargs={})
>>> partial.b(6).bind()
(a=1, b=6, c=3, args=(), kwargs={})
"""
def __init__(self, fn, signature=None, pending_args=None):
self.fn = fn
self.signature = signature or inspect.signature(fn)
self.pending_args = pending_args or {}
for parameter in self.signature.parameters.values():
if parameter.name == self.bind.__name__:
raise ValueError(
f"`fluent` reserves the name {self.bind.__name__}, please choose a different parameter name"
)
if parameter.name in self.pending_args:
continue
setattr(self, parameter.name, self.__apply(parameter))
def __apply(self, parameter):
@functools.wraps(self.fn)
def wrapper(*args, **kwargs):
signature = inspect.Signature(parameters=[parameter])
bound_args = signature.bind(*args, **kwargs)
bound_args.apply_defaults()
return self.__class__(
self.fn,
self.signature,
{**self.pending_args, parameter.name: bound_args.arguments},
)
return wrapper
def bind(
self,
*,
call_before: Optional[Callable] = None,
call_after: Optional[Callable] = None,
):
"""Finalize the call chain for a fluently called factory.
Args:
call_before: When provided, calls the given callable, supplying the args and kwargs
being sent into the factory function before actually calling it. If the `call_before`
function returns anything, the 2-tuple of (args, kwargs) will be replaced with the
ones passed into the `call_before` function.
call_after: When provided, calls the given callable, supplying the result of the factory
function call after having called it. If the `call_after` function returns anything,
the result of `call_after` will be replaced with the result of the factory function.
"""
unsupplied_args = set(self.signature.parameters) - set(self.pending_args)
for arg in unsupplied_args:
fn = getattr(self, arg)
self = fn()
args: List[Any] = []
kwargs: Dict[Any, Any] = {}
for parameter in self.signature.parameters.values():
kind_map: Dict[Any, Tuple[Callable, bool]] = {
parameter.POSITIONAL_ONLY: (args.append, True),
parameter.POSITIONAL_OR_KEYWORD: (args.append, True),
parameter.VAR_POSITIONAL: (args.extend, True),
parameter.VAR_KEYWORD: (kwargs.update, True),
parameter.KEYWORD_ONLY: (kwargs.update, False),
}
pending_arg = self.pending_args[parameter.name]
update_fn, key_on_param = kind_map[parameter.kind]
if key_on_param:
update_fn(pending_arg[parameter.name])
else:
update_fn(pending_arg)
if call_before:
call_before_result = call_before(args, MappingProxyType(kwargs))
if call_before_result:
args, kwargs = call_before_result
result = self.fn(*args, **kwargs)
if call_after:
call_after_result = call_after(result)
if call_after_result:
return call_after_result
return result
|
"""
Module for jenkinsapi Node class
"""
import json
import logging
import xml.etree.ElementTree as ET
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.custom_exceptions import PostRequired
from jenkinsapi.custom_exceptions import JenkinsAPIException
from six.moves.urllib.parse import quote as urlquote
log = logging.getLogger(__name__)
class Node(JenkinsBase):
"""
Class to hold information on nodes that are attached as slaves
to the master jenkins instance
"""
def __init__(self, jenkins_obj, baseurl, nodename, node_dict, poll=True):
"""
Init a node object by providing all relevant pointers to it
:param jenkins_obj: ref to the jenkins obj
:param baseurl: basic url for querying information on a node
If url is not set - object will construct it itself. This is
useful when node is being created and not exists in Jenkins yet
:param nodename: hostname of the node
:param dict node_dict: Dict with node parameters as described below
:param bool poll: set to False if node does not exist or automatic
refresh from Jenkins is not required. Default is True.
If baseurl parameter is set to None - poll parameter will be
set to False
JNLP Node:
{
'num_executors': int,
'node_description': str,
'remote_fs': str,
'labels': str,
'exclusive': bool
}
SSH Node:
{
'num_executors': int,
'node_description': str,
'remote_fs': str,
'labels': str,
'exclusive': bool,
'host': str,
'port': int
'credential_description': str,
'jvm_options': str,
'java_path': str,
'prefix_start_slave_cmd': str,
'suffix_start_slave_cmd': str
'max_num_retries': int,
'retry_wait_time': int,
'retention': str ('Always' or 'OnDemand')
'ondemand_delay': int (only for OnDemand retention)
'ondemand_idle_delay': int (only for OnDemand retention)
'env': [
{
'key':'TEST',
'value':'VALUE'
},
{
'key':'TEST2',
'value':'value2'
}
],
'tool_location': [
{
"key": "hudson.tasks.Maven$MavenInstallation$DescriptorImpl@Maven 3.0.5",
"home": "/home/apache-maven-3.0.5/"
},
{
"key": "hudson.plugins.git.GitTool$DescriptorImpl@Default",
"home": "/home/git-3.0.5/"
},
]
}
:return: None
:return: Node obj
"""
self.name = nodename
self.jenkins = jenkins_obj
if not baseurl:
poll = False
baseurl = '%s/computer/%s' % (self.jenkins.baseurl, self.name)
JenkinsBase.__init__(self, baseurl, poll=poll)
self.node_attributes = node_dict
self._element_tree = None
self._config = None
def get_node_attributes(self):
"""
Gets node attributes as dict
Used by Nodes object when node is created
:return: Node attributes dict formatted for Jenkins API request
to create node
"""
na = self.node_attributes
if not na.get('credential_description', False):
# If credentials description is not present - we will create
# JNLP node
launcher = {'stapler-class': 'hudson.slaves.JNLPLauncher'}
else:
try:
credential = self.jenkins.credentials[
na['credential_description']
]
except KeyError:
raise JenkinsAPIException('Credential with description "%s"'
' not found'
% na['credential_description'])
retries = na['max_num_retries'] if 'max_num_retries' in na else ''
re_wait = na['retry_wait_time'] if 'retry_wait_time' in na else ''
launcher = {
'stapler-class': 'hudson.plugins.sshslaves.SSHLauncher',
'$class': 'hudson.plugins.sshslaves.SSHLauncher',
'host': na['host'],
'port': na['port'],
'credentialsId': credential.credential_id,
'jvmOptions': na['jvm_options'],
'javaPath': na['java_path'],
'prefixStartSlaveCmd': na['prefix_start_slave_cmd'],
'suffixStartSlaveCmd': na['suffix_start_slave_cmd'],
'maxNumRetries': retries,
'retryWaitTime': re_wait
}
retention = {
'stapler-class': 'hudson.slaves.RetentionStrategy$Always',
'$class': 'hudson.slaves.RetentionStrategy$Always'
}
if 'retention' in na and na['retention'].lower() == 'ondemand':
retention = {
'stapler-class': 'hudson.slaves.RetentionStrategy$Demand',
'$class': 'hudson.slaves.RetentionStrategy$Demand',
'inDemandDelay': na['ondemand_delay'],
'idleDelay': na['ondemand_idle_delay']
}
node_props = {
'stapler-class-bag': 'true'
}
if 'env' in na:
node_props.update({
'hudson-slaves-EnvironmentVariablesNodeProperty': {
'env': na['env']
}
})
if 'tool_location' in na:
node_props.update({
"hudson-tools-ToolLocationNodeProperty": {
"locations": na['tool_location']
}
})
params = {
'name': self.name,
'type': 'hudson.slaves.DumbSlave$DescriptorImpl',
'json': json.dumps({
'name': self.name,
'nodeDescription': na['node_description'],
'numExecutors': na['num_executors'],
'remoteFS': na['remote_fs'],
'labelString': na['labels'],
'mode': 'EXCLUSIVE' if na['exclusive'] else 'NORMAL',
'retentionStrategy': retention,
'type': 'hudson.slaves.DumbSlave',
'nodeProperties': node_props,
'launcher': launcher
})
}
return params
def get_jenkins_obj(self):
return self.jenkins
def __str__(self):
return self.name
def is_online(self):
return not self.poll(tree='offline')['offline']
def is_temporarily_offline(self):
return self.poll(tree='temporarilyOffline')['temporarilyOffline']
def is_jnlpagent(self):
return self._data['jnlpAgent']
def is_idle(self):
return self._data['idle']
def set_online(self):
"""
Set node online.
Before change state verify client state: if node set 'offline'
but 'temporarilyOffline' is not set - client has connection problems
and AssertionError raised.
If after run node state has not been changed raise AssertionError.
"""
self.poll()
# Before change state check if client is connected
if self._data['offline'] and not self._data['temporarilyOffline']:
raise AssertionError("Node is offline and not marked as "
"temporarilyOffline, check client "
"connection: offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
elif self._data['offline'] and self._data['temporarilyOffline']:
self.toggle_temporarily_offline()
if self._data['offline']:
raise AssertionError("The node state is still offline, "
"check client connection:"
" offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
def set_offline(self, message="requested from jenkinsapi"):
"""
Set node offline.
If after run node state has not been changed raise AssertionError.
: param message: optional string explain why you are taking this
node offline
"""
if not self._data['offline']:
self.toggle_temporarily_offline(message)
data = self.poll(tree='offline,temporarilyOffline')
if not data['offline']:
raise AssertionError("The node state is still online:" +
"offline = %s , temporarilyOffline = %s" %
(data['offline'],
data['temporarilyOffline']))
def toggle_temporarily_offline(self, message="requested from jenkinsapi"):
"""
Switches state of connected node (online/offline) and
set 'temporarilyOffline' property (True/False)
Calling the same method again will bring node status back.
:param message: optional string can be used to explain why you
are taking this node offline
"""
initial_state = self.is_temporarily_offline()
url = self.baseurl + \
"/toggleOffline?offlineMessage=" + urlquote(message)
try:
html_result = self.jenkins.requester.get_and_confirm_status(url)
except PostRequired:
html_result = self.jenkins.requester.post_and_confirm_status(
url,
data={})
self.poll()
log.debug(html_result)
state = self.is_temporarily_offline()
if initial_state == state:
raise AssertionError(
"The node state has not changed: temporarilyOffline = %s" %
state)
def update_offline_reason(self, reason):
"""
Update offline reason on a temporary offline clsuter
"""
if self.is_temporarily_offline():
url = self.baseurl + '/changeOfflineCause?offlineMessage=' + urlquote(reason)
self.jenkins.requester.post_and_confirm_status(url, data={})
def offline_reason(self):
return self._data['offlineCauseReason']
@property
def _et(self):
return self._get_config_element_tree()
def _get_config_element_tree(self):
"""
Returns an xml element tree for the node's config.xml. The
resulting tree is cached for quick lookup.
"""
if self._config is None:
self.load_config()
if self._element_tree is None:
self._element_tree = ET.fromstring(self._config)
return self._element_tree
def get_config(self):
"""
Returns the config.xml from the node.
"""
response = self.jenkins.requester.get_and_confirm_status(
"%(baseurl)s/config.xml" % self.__dict__)
return response.text
def load_config(self):
"""
Loads the config.xml for the node allowing it to be re-queried
without generating new requests.
"""
if self.name == 'master':
raise JenkinsAPIException('master node does not have config.xml')
self._config = self.get_config()
self._get_config_element_tree()
def upload_config(self, config_xml):
"""
Uploads config_xml to the config.xml for the node.
"""
if self.name == 'master':
raise JenkinsAPIException('master node does not have config.xml')
self.jenkins.requester.post_and_confirm_status(
"%(baseurl)s/config.xml" % self.__dict__,
data=config_xml)
def get_labels(self):
"""
Returns the labels for a slave as a string with each label
separated by the ' ' character.
"""
return self.get_config_element('label')
def get_num_executors(self):
try:
return self.get_config_element('numExecutors')
except JenkinsAPIException:
return self._data['numExecutors']
def set_num_executors(self, value):
"""
Sets number of executors for node
Warning! Setting number of executors on master node will erase all
other settings
"""
set_value = value if isinstance(value, str) else str(value)
if self.name == 'master':
# master node doesn't have config.xml, so we're going to submit
# form here
data = 'json=%s' % urlquote(
json.dumps({
"numExecutors": set_value,
"nodeProperties": {
"stapler-class-bag": "true"
}
})
)
url = self.baseurl + '/configSubmit'
self.jenkins.requester.post_and_confirm_status(url, data=data)
else:
self.set_config_element('numExecutors', set_value)
self.poll()
def get_config_element(self, el_name):
"""
Returns simple config element.
Better not to be used to return "nodeProperties" or "launcher"
"""
return self._et.find(el_name).text
def set_config_element(self, el_name, value):
"""
Sets simple config element
"""
self._et.find(el_name).text = value
xml_str = ET.tostring(self._et)
self.upload_config(xml_str)
def get_monitor(self, monitor_name, poll_monitor=True):
"""
Polls the node returning one of the monitors in the monitorData branch of the
returned node api tree.
"""
monitor_data_key = 'monitorData'
if poll_monitor:
# polling as monitors like response time can be updated
monitor_data = self.poll(tree=monitor_data_key)[monitor_data_key]
else:
monitor_data = self._data[monitor_data_key]
full_monitor_name = 'hudson.node_monitors.{0}'.format(monitor_name)
if full_monitor_name not in monitor_data:
raise AssertionError('Node monitor %s not found' % monitor_name)
return monitor_data[full_monitor_name]
def get_available_physical_memory(self):
"""
Returns the node's available physical memory in bytes.
"""
monitor_data = self.get_monitor('SwapSpaceMonitor')
return monitor_data['availablePhysicalMemory']
def get_available_swap_space(self):
"""
Returns the node's available swap space in bytes.
"""
monitor_data = self.get_monitor('SwapSpaceMonitor')
return monitor_data['availableSwapSpace']
def get_total_physical_memory(self):
"""
Returns the node's total physical memory in bytes.
"""
monitor_data = self.get_monitor('SwapSpaceMonitor')
return monitor_data['totalPhysicalMemory']
def get_total_swap_space(self):
"""
Returns the node's total swap space in bytes.
"""
monitor_data = self.get_monitor('SwapSpaceMonitor')
return monitor_data['totalSwapSpace']
def get_workspace_path(self):
"""
Returns the local path to the node's Jenkins workspace directory.
"""
monitor_data = self.get_monitor('DiskSpaceMonitor')
return monitor_data['path']
def get_workspace_size(self):
"""
Returns the size in bytes of the node's Jenkins workspace directory.
"""
monitor_data = self.get_monitor('DiskSpaceMonitor')
return monitor_data['size']
def get_temp_path(self):
"""
Returns the local path to the node's temp directory.
"""
monitor_data = self.get_monitor('TemporarySpaceMonitor')
return monitor_data['path']
def get_temp_size(self):
"""
Returns the size in bytes of the node's temp directory.
"""
monitor_data = self.get_monitor('TemporarySpaceMonitor')
return monitor_data['size']
def get_architecture(self):
"""
Returns the system architecture of the node eg. "Linux (amd64)".
"""
# no need to poll as the architecture will never change
return self.get_monitor('ArchitectureMonitor', poll_monitor=False)
def get_response_time(self):
"""
Returns the node's average response time.
"""
monitor_data = self.get_monitor('ResponseTimeMonitor')
return monitor_data['average']
def get_clock_difference(self):
"""
Returns the difference between the node's clock and the master Jenkins clock.
Used to detect out of sync clocks.
"""
monitor_data = self.get_monitor('ClockMonitor')
return monitor_data['diff']
|
# -*- coding: utf-8 -*-
"""
Part of the voitools package
Copyright 2015 Board of Regents of the University of Wisconsin System
Licensed under the MIT license; see LICENSE at the root of the package.
Authored by Nate Vack <njvack@wisc.edu> at the Waisman Laboratory for Brain
Imaging and Behavior.
This contains tests for the voi_info script
"""
from __future__ import (
print_function,
unicode_literals,
division,
absolute_import)
from voitools.scripts import voi_info
import pytest
def test_voitools_runs(capsys):
with pytest.raises(SystemExit):
voi_info.main(['--help'])
out, err = capsys.readouterr()
assert len(out) > 0
assert len(err) == 0
def test_voitools_reads_long_file(long_data_filename, capsys):
voi_info.main([long_data_filename])
out, err = capsys.readouterr()
assert len(out) > 0
assert len(err) == 0
def test_voitools_reads_triple_file(triple_data_filename, capsys):
voi_info.main([triple_data_filename])
out, err = capsys.readouterr()
assert len(out) > 0
assert len(err) == 0
|
# Generated by Django 3.1.6 on 2021-02-07 22:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0007_auto_20201028_1405'),
]
operations = [
migrations.AddField(
model_name='resizedredirect',
name='name',
field=models.CharField(blank=True, help_text='User friendly name', max_length=64, null=True),
),
migrations.AddField(
model_name='resizedredirect',
name='sitemap',
field=models.BooleanField(default=False, verbose_name='Include in Sitemap'),
),
]
|
"""Defines a time simulation responsible for executing any registered
producers
"""
import datetime
import time
from enum import IntEnum
import logging
import logging.config
from pathlib import Path
import pandas as pd
# Import logging before models to ensure configuration is picked up
logging.config.fileConfig(f"{Path(__file__).parents[0]}/logging.ini")
from connector import configure_connector
from models import Line
from models import Weather
logger = logging.getLogger(__name__)
class TimeSimulation:
weekdays = IntEnum("weekdays", "mon tue wed thu fri sat sun", start=0)
ten_min_frequency = datetime.timedelta(minutes=10)
def __init__(self, sleep_seconds=5, time_step=None, schedule=None):
"""Initializes the time simulation"""
self.sleep_seconds = sleep_seconds
self.time_step = time_step
if self.time_step is None:
self.time_step = datetime.timedelta(minutes=self.sleep_seconds)
# Read data from disk
self.raw_df = pd.read_csv(
f"{Path(__file__).parents[0]}/data/cta_stations.csv"
).sort_values("order")
# Define the train schedule (same for all trains)
self.schedule = schedule
if schedule is None:
self.schedule = {
TimeSimulation.weekdays.mon: {0: TimeSimulation.ten_min_frequency},
TimeSimulation.weekdays.tue: {0: TimeSimulation.ten_min_frequency},
TimeSimulation.weekdays.wed: {0: TimeSimulation.ten_min_frequency},
TimeSimulation.weekdays.thu: {0: TimeSimulation.ten_min_frequency},
TimeSimulation.weekdays.fri: {0: TimeSimulation.ten_min_frequency},
TimeSimulation.weekdays.sat: {0: TimeSimulation.ten_min_frequency},
TimeSimulation.weekdays.sun: {0: TimeSimulation.ten_min_frequency},
}
self.train_lines = [
Line(Line.colors.blue, self.raw_df[self.raw_df["blue"]]),
Line(Line.colors.red, self.raw_df[self.raw_df["red"]]),
Line(Line.colors.green, self.raw_df[self.raw_df["green"]]),
]
def run(self):
curr_time = datetime.datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0
)
logger.info("Beginning simulation, press Ctrl+C to exit at any time")
logger.info("loading kafka connect jdbc source connector")
configure_connector()
logger.info("beginning cta train simulation")
weather = Weather(curr_time.month)
try:
while True:
logger.debug("simulation running: %s", curr_time.isoformat())
# Send weather on the top of the hour
if curr_time.minute == 0:
weather.run(curr_time.month)
_ = [line.run(curr_time, self.time_step) for line in self.train_lines]
curr_time = curr_time + self.time_step
time.sleep(self.sleep_seconds)
except KeyboardInterrupt as e:
logger.info("Shutting down")
_ = [line.close() for line in self.train_lines]
if __name__ == "__main__":
TimeSimulation().run()
|
from __future__ import print_function
try:
basestring
except NameError:
basestring = str
try:
# noinspection PyShadowingBuiltins
input = raw_input
except NameError:
pass
import getpass
import json
import os
import shutil
import subprocess
import sys
import tempfile
from abc import ABCMeta
from collections import OrderedDict
from os.path import dirname, exists, expanduser, expandvars
from distutils.spawn import find_executable
HOME = expanduser("~")
class PackageManager(object):
__metaclass__ = ABCMeta
@classmethod
def basics(cls):
return {name: pm for name, pm in cls.all().items() if pm.installer is None}
@classmethod
def manual(cls):
return cls.get("manual")
__all = {}
@classmethod
def all(cls):
if not cls.__all:
cls.__all = {
"apk": Apk(),
"apt": Apt(),
"pacman": Pacman(),
"yum": Yum(),
"snap": Snap(),
"flatpak": Flatpak(),
"manual": Manual(),
}
return cls.__all
@classmethod
def get(cls, item):
return cls.all()[item]
def __init__(self, name, install_template, installer=None):
self.name = name
self.install_template = install_template
self.installer = installer
def init(self, cli):
return cli.is_installed(self.name)
def install_cmd(self, packages):
return self.install_template.format(package=packages)
class Apk(PackageManager):
def __init__(self):
super(Apk, self).__init__("apk", "apk add {package}")
class Apt(PackageManager):
def __init__(self):
super(Apt, self).__init__("apt", "apt -y install {package}")
def init(self, cli):
return cli.is_installed("apt") and cli.run_as_root("apt update")
class Yum(PackageManager):
def __init__(self):
super(Yum, self).__init__("yum", "yum install -y {package}")
class Pacman(PackageManager):
def __init__(self):
super(Pacman, self).__init__("pacman", "pacman --noconfirm -S {package}")
def init(self, cli):
return cli.is_installed("pacman") and cli.run_as_root("pacman --noconfirm -Syu")
class Snap(PackageManager):
def __init__(self):
super(Snap, self).__init__("snap", "snap install {package}", installer=Software("snap", all="snapd"))
class Flatpak(PackageManager):
def __init__(self):
super(Flatpak, self).__init__(name="flatpak",
install_template="flatpak install {package}",
installer=Software("flatpak", pacman="flatpak", yum="flatpak"))
class Manual(PackageManager):
def __init__(self):
super(Manual, self).__init__(name="manual",
install_template="{package}",
installer="manual installation")
def init(self, cli):
return True
class Software:
def __init__(self, name, description=None, requirements=None, as_sudo=True, flavors=None, check_available=None, **kwargs):
packages = {}
# try to get the "default" package name (all)
if "default" in kwargs:
default = kwargs["default"]
for man_name in PackageManager.basics():
# add package for all default package managers
packages[man_name] = default
del kwargs["default"]
# package manager specific package names
for key, value in kwargs.items():
try:
if value:
packages[key] = value
elif key in packages:
# explicitly set to None / empty package: remove install possibility for this manager
del packages[key]
except KeyError:
print("Unknown package manager {} ..".format(key), file=sys.stderr)
del packages[key]
self.name = name
self.description = description if description else name
self.as_sudo = as_sudo
self.flavors = flavors if flavors else {}
# in case there are flavor choices we have to
self.flavors_selected = not self.flavors
self.__unflavored_packages = packages
self.__requirements = requirements
self.__check_available = check_available
def packages(self, package_manager):
assert self.flavors_selected, "Error, flavors not yet selected!"
unflavored_package_def = self.__unflavored_packages[package_manager]
if isinstance(unflavored_package_def, basestring):
return self.__add_flavors(unflavored_package_def)
return [self.__add_flavors(pack) if isinstance(pack, basestring) else pack for pack in unflavored_package_def]
@property
def installers(self):
return [pman for pman in self.__unflavored_packages.keys()]
def choose_flavors(self, choices):
assert self.flavors.keys() == choices.keys(),\
"Flavor selection `%s` differences from available options `%s`" % (list(choices), list(self.flavors))
self.flavors_selected = True
self.flavors = choices
def install_via(self, cli, package_manager):
execute = cli.run
if self.as_sudo:
execute = cli.run_as_root
def flavored_packages(definition):
return package_manager.install_cmd(self.__add_flavors(definition))
return cli.run_all(
cmds=self.packages(package_manager.name),
default_action=execute,
value_wrapper=flavored_packages,
log=True,
)
def check_requirements(self, cli):
if self.__requirements is None:
return True
cli.info("Checking requirements for {name} ...".format(name=self.name))
return cli.run_all(
cmds=self.__requirements,
default_action=cli.is_installed,
value_wrapper=self.__add_flavors,
log=True,
)
def is_available(self, cli):
checks = self.__check_available
if checks is None:
checks = self.name
return cli.run_all(
cmds=checks,
default_action=cli.is_installed,
value_wrapper=self.__add_flavors,
)
def __add_flavors(self, unflavored):
return expandvars(unflavored).format(**self.flavors)
@classmethod
def parse_list(cls, file):
with open(file, mode="r") as fp:
# it would be much nicer if we could use yaml instead of json, however, python 2.7 does not support yaml out
# of the box
all_definitions = json.load(fp, object_pairs_hook=OrderedDict)
return [cls(name=name, **params) for name, params in all_definitions.items()]
# TODO
# check which programs are available (apt, yum, ...)
# update repositories
# go through all software packages and ask the user which to install, he has the following options:
# skip: do neither check nor install
# normal: does check before executing the install commands and after (if after check fails, give warning/error ..)
# force: does not before executing the install commands but checks afterwards (if check fails, error/warning)
# try to find an existing & compatible installer go through all installers .. if init() fails, check next try to identify installed additional_installer
# run the installer:
# 1. if single package_def (str): exec.run_as_root(PM.install_template.format(package=package_def))
# 2a. if instruction set (list of str): run the individual commands (run_as_root(cmd1),run_as_root(cmd2),...) or ...
# 2b. if it is a callable: execute the callable
class ColoredConsole:
BLUE = "\033[94m"
GREEN = "\033[92m"
ORANGE = "\033[93m"
GRAY = "\033[37m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
ENDC = "\033[0m"
FRAME_HORIZONTAL = u"\u2550"
FRAME_VERTICAL = u"\u2551"
FRAME_LEFT_TOP = u"\u2554"
FRAME_RIGHT_TOP = u"\u2557"
FRAME_LEFT_BOTTOM = u"\u255A"
FRAME_RIGHT_BOTTOM = u"\u255D"
FRAME_SEP = u"\u2500"
FRAME_LEFT_SEP = u"\u255F"
FRAME_RIGHT_SEP = u"\u2562"
def __init__(self):
self.sudo_file = None
self.available_installers = {name: base_man.init(self) for name, base_man in PackageManager.basics().items()}
self.available_installers["manual"] = True
for name, man in PackageManager.all().items():
if name not in self.available_installers and man.init(self) is True:
# just check for the "non-basic" installers (e.g., snap) .. if not available, we will see later if we
# should ask for installation
self.available_installers[name] = True
def __del__(self):
self.unset_pw()
def print(self, msg, colors):
print(colors + msg + self.ENDC)
def headline(self, msg, list_installers=False):
content_size = max(78, len(msg)+2)
def fill(filler=" ", text="", center=False):
left = 1
if center:
left = max(int((content_size - len(text)) / 2.), 1)
right = content_size - left - len(text)
return (filler * left) + text + (filler * right)
# daw frame
self.info(self.FRAME_LEFT_TOP + fill(filler=self.FRAME_HORIZONTAL) + self.FRAME_RIGHT_TOP)
self.info(self.FRAME_VERTICAL + self.BOLD + fill(text=msg) + self.FRAME_VERTICAL)
if list_installers:
self.info(
self.FRAME_LEFT_SEP +
fill(text=" Available Package Managers ", filler=self.FRAME_SEP, center=True) +
self.FRAME_RIGHT_SEP
)
available = sorted(installer for installer, available in self.available_installers.items() if available)
for installer in available:
self.info(self.FRAME_VERTICAL + fill(text=installer) + self.FRAME_VERTICAL)
self.info(self.FRAME_LEFT_BOTTOM + fill(filler=self.FRAME_HORIZONTAL) + self.FRAME_RIGHT_BOTTOM)
def error(self, msg):
self.print(msg, self.RED + self.BOLD)
def success(self, msg):
self.print(msg, self.GREEN)
def info(self, msg):
self.print(msg, self.BLUE)
def debug(self, msg):
self.print(msg, self.GRAY)
def ask(self, question, options=None):
"""
Ask a question with the given options.
If options is empty array, no choices will be checked (no default set).
:param question: The question to ask.
:param options: The possible choices for the user as an array of strings (default: [*y*/n]). The first value
between a pair of two asterisks will be used as default value (leading and trailing double-asterisks will be
removed).
:return: The "answer" return value, e.g., is "y" for "yes or "n" for "no".
"""
if options is None:
options = ["**y**", "n"]
def is_default(value):
return value.startswith("**") and value.endswith("**")
def strip_default(value, tty_highlight=False):
if not is_default(value):
return value
if tty_highlight:
return "{}{}{}".format(self.BOLD + self.UNDERLINE, value[2:-2].upper(), self.ENDC + self.ORANGE)
return value[2:-2]
default = None
prompt = ""
if len(options) == 1:
default = options[0]
prompt = " [default: " + default + "]"
elif len(options) > 1:
for val in options:
if is_default(val):
default = strip_default(val)
break
prompt = " [" + "/".join(strip_default(val, True) for val in options) + "]"
if default is not None:
# remove double asterisks
options = [strip_default(val) for val in options]
# normalize options for comparisons
options_lower = {option.lower(): option for option in options}
while True:
choice = input(self.ORANGE + question + prompt + self.ENDC + " >>> ").strip()
if len(options) == 0:
return choice
elif choice == "" and default is not None:
return default
elif choice.lower() in options_lower:
return options_lower[choice.lower()]
else:
self.error("Please respond with either of these choices" + prompt)
def set_pw(self):
name = None
try:
fd, name = tempfile.mkstemp(text=True)
self.debug("Created temp-file {} ...".format(name))
os.write(fd, "#!/bin/sh\necho '{}'".format(
getpass.getpass(self.ORANGE + "Please enter the password to use sudo:" + self.ENDC + " >>> ")
).encode())
os.fchmod(fd, 0o700)
os.close(fd)
self.unset_pw()
except Exception:
if name:
shutil.rmtree(name, ignore_errors=True)
raise
self.sudo_file = name
def unset_pw(self):
if self.sudo_file:
self.debug("Deleting temp-file {} ...".format(self.sudo_file))
os.remove(self.sudo_file)
self.sudo_file = None
@staticmethod
def is_root():
return getpass.getuser() == "root"
def init_sudo(self):
if not self.sudo_file:
self.set_pw()
os.environ['SUDO_ASKPASS'] = self.sudo_file
def run_as_root(self, cmd, silent=False):
if self.is_root():
# run command directly
return self.run(cmd, silent=silent)
else:
# run command with sudo
self.init_sudo()
return self.run(["/usr/bin/sudo", "-A"] + cmd.split(), silent=silent)
def run(self, cmd, silent=False):
if isinstance(cmd, basestring):
cmd = cmd.split()
try:
if silent:
devnull = open(os.devnull, 'w')
return subprocess.call(cmd, stdout=devnull, stderr=devnull) == 0
else:
return subprocess.call(cmd) == 0
except Exception:
return False
def write_to_file(self, content, path, mode="w"):
parent = dirname(path)
try:
os.makedirs(parent, exist_ok=True)
except TypeError:
# exist_ok only available starting with 3.2 .. try without
try:
os.makedirs(parent)
except OSError:
# expect: [Errno 17] File exists: '{parent}'
pass
try:
with open(path, mode=mode) as fd:
fd.write(content)
except Exception:
self.error("Unable to {} content to `{}`!".format("append" if mode == "a" else "write", path))
return False
return True
@staticmethod
def is_installed(command):
return find_executable(command) is not None
@staticmethod
def file_exists(filename):
return exists(filename)
def run_all(self, cmds, default_action, value_wrapper=lambda x: x, log=False):
log = self.debug if log else lambda x: None
if isinstance(cmds, basestring):
return default_action(value_wrapper(cmds))
if isinstance(cmds, list):
# here we go recursive
success = True
for item in cmds:
if not success:
log(" --> Skipping step (due to previous problem): {}".format(item))
else:
log(" Executing: {}".format(item))
success &= self.run_all(item, default_action, value_wrapper)
if not success:
log(" --> Step execution failed: {}".format(item))
return success
if isinstance(cmds, dict):
# here we have a dynamic definition of a check:
if cmds["type"] == "find_executable":
return self.is_installed(value_wrapper(cmds["cmd"]))
if cmds["type"] == "file_exists":
filenames = cmds["file"]
if isinstance(filenames, list):
# multiple file combination: OR
return any(self.file_exists(value_wrapper(file)) for file in filenames)
return self.file_exists(value_wrapper(filenames))
if cmds["type"] == "write_to_file":
return self.write_to_file(**{key: value_wrapper(val) for key, val in cmds.items() if key != "type"})
if cmds["type"] == "lambda":
return eval(value_wrapper(cmds["code"]))(self)
raise ValueError("Unknown check definition: {}".format(cmds))
def try_install(self, software, pman):
self.info(" Trying to install {} via {} ...".format(software.description, pman.name))
# here we are lets try to execute the installer
success = software.install_via(cli=self, package_manager=pman)
if success:
if software.is_available(self):
self.success(" Installation of {} via {} successful!".format(software.name, pman.name))
else:
self.debug(" Steps executed successfully but not able to verify installation of {} via {}!".format(
software.name, pman.name
))
return success
def choose_installers(self, software_packages):
for software in software_packages:
if not software.is_available(self) and \
software.check_requirements(self) and \
"y" == self.ask("Do you want to install {}?".format(software.description)).lower():
if not software.flavors_selected:
flavor_choices = {}
for flavor_type, choices in software.flavors.items():
if isinstance(choices, basestring):
choices = [choices]
flavor_choices[flavor_type] = self.ask(
" Please choose a flavor for `{}`".format(flavor_type), options=choices
)
software.choose_flavors(flavor_choices)
tried_pms = []
for name in software.installers:
pman = PackageManager.get(name)
if name not in self.available_installers:
# we didn't try using this pm yet .. try to initialize and cache result for next time
pm_is_initialized = pman.init(self)
if isinstance(pm_is_initialized, Software):
# this is a special case .. we get a software package back, so we have to try install it
# before checking if it is correctly installed
for pm_installer in pm_is_initialized.installers:
base_pm = PackageManager.get(pm_installer)
if self.try_install(pman=base_pm, software=software):
pm_is_initialized = pman.init(self)
break
self.available_installers[pman.name] = pm_is_initialized
if not self.available_installers[pman.name]:
continue
tried_pms.append(pman.name)
if self.try_install(pman=pman, software=software):
break
if not software.is_available(self):
if tried_pms:
self.error(" Unable to install `{}`, tried: {}".format(
software.name,
", ".join(tried_pms)
))
else:
self.error(" Unable to install `{}`, no suitable package manager found!".format(software.name))
def start(*package_lists):
cli = ColoredConsole()
if package_lists:
for idx, package_list in enumerate(package_lists):
cli.headline("Installing software from {} ...".format(package_list), list_installers=idx==0)
cli.choose_installers(Software.parse_list(package_list))
else:
cli.headline("Basic software packages ...", list_installers=True)
cli.choose_installers(Software.parse_list("install/basics.json"))
cli.headline("Desktop software packages ...")
cli.choose_installers(Software.parse_list("install/desktop.json"))
cli.headline("Development software packages ...")
cli.choose_installers(Software.parse_list("install/development.json"))
if __name__ == '__main__':
start(*sys.argv[1:])
|
from sqlalchemy import Table, Column, ForeignKey, String, Integer, Float
from sqlalchemy.orm import relationship
from database import Base
boardgame_categories = Table('boardgame_categories', Base.metadata,
Column('id', Integer, nullable=False, primary_key=True),
Column('boardgame_id', Integer, ForeignKey('boardgames.id')),
Column('category_id', Integer, ForeignKey('categories.id')))
class Categories(Base):
"""Boardgame category database model."""
__tablename__ = 'categories'
id = Column(Integer, nullable=False, primary_key=True)
name = Column(String)
boardgames = relationship('Boardgames', secondary=boardgame_categories, back_populates='categories')
class Boardgames(Base):
"""Boardgame database model."""
__tablename__ = 'boardgames'
id = Column(Integer, nullable=False, primary_key=True)
name = Column(String)
year = Column(Integer)
score = Column(Float)
complexity = Column(Float)
brief = Column(String)
description = Column(String)
categories = relationship('Categories', secondary=boardgame_categories, back_populates='boardgames')
|
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, roc_auc_score
def objective_function( X, y, weights: list) -> list:
"""This is the vector of objective function to maximize"""
LR = LogisticRegression(solver='liblinear', max_iter=200, tol=1e-7)
RF = RandomForestClassifier(n_estimators=100, max_depth=4, min_samples_split=0.03,
min_samples_leaf=0.05)
estimators = [('lr', LR), ('rf', RF)]
clf = VotingClassifier(estimators=estimators, voting='soft', weights=weights)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
train_size=0.7, random_state=1, stratify=y)
clf.fit(X_train,y_train)
y_pred =clf.predict(X_test)
_, _, fn, tp = confusion_matrix(y_test, y_pred).ravel()
sensitivity = tp/(tp+fn)
y_pred = clf.predict_proba(X_test)[:,1]
auc =roc_auc_score(y_test, y_pred)
return -auc, -sensitivity
|
import secrets
class Authenticator:
def __init__(self, api_token: str) -> None:
self.api_token = api_token
def authenticate(self, user_token: str) -> bool:
return secrets.compare_digest(user_token, self.api_token)
|
# Generated by Django 2.2.1 on 2019-06-11 19:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobs', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='job',
name='order_item',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='job', to='orders.OrderItem'),
),
]
|
# -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2021, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A collection of utility functions and classes designed to enhances Hikari."""
from __future__ import annotations
__all__: typing.Sequence[str] = [
# /__init__.py
"__author__",
"__ci__",
"__copyright__",
"__docs__",
"__email__",
"__issue_tracker__",
"__license__",
"__url__",
"__version__",
# /asgi.py
"asgi",
"AsgiAdapter",
# /backoff.py
"backoff",
"Backoff",
"ErrorManager",
# /components.py
"components",
"AbstractComponentExecutor",
"ActionRowExecutor",
"as_child_executor",
"as_component_callback",
"ChildActionRowExecutor",
"ComponentClient",
"ComponentContext",
"ComponentExecutor",
"ComponentPaginator",
"InteractiveButtonBuilder",
"MultiComponentExecutor",
"SelectMenuBuilder",
"WaitForComponent",
# /reactions.py
"reactions",
"as_reaction_callback",
"AbstractReactionHandler",
"ReactionHandler",
"ReactionPaginator",
"ReactionClient",
# /pagination.py
"pagination",
"async_paginate_string",
"sync_paginate_string",
"paginate_string",
]
import typing
from .asgi import *
from .backoff import *
from .components import *
from .pagination import *
from .reactions import *
__author__ = "Faster Speeding"
__ci__ = "https://github.com/FasterSpeeding/Yuyo/actions"
__copyright__ = "© 2020 Faster Speeding"
__coverage__ = "https://codeclimate.com/github/FasterSpeeding/Yuyo"
__docs__ = "https://yuyo.cursed.solutions/"
__email__ = "lucina@lmbyrne.dev"
__issue_tracker__ = "https://github.com/FasterSpeeding/Yuyo/issues"
__license__ = "BSD"
__url__ = "https://github.com/FasterSpeeding/Yuyo"
__version__ = "1.0.4a1"
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Independent Variables
fail_rate = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, .00001, .00001, .00001, .001, .001, .001, .01, .01, .01, .01, .01, .01, .01, .01, .01]
join_leave_rate = [.0001, .0001, .0001, .0001, .0001, .001, .001, .001, .01, .01, .0001, .0001, .0001, .0001, .0001, .0001, .0001, .0001, .0001, .001, .001, .001, .01, .01, .01]
client_rate = [3, 2, 1, .5, .1, 1, .5, .1, .5, .1, 1, .1, .05, 1, .1, .05, 1, .5, .1, 1, .5, .1, 1, .5, .1]
# Dependent Variables
insert_hops = [12, 12, 12, 12, 12, 11, 8, 10, 10, 9, 12, 4, 12, 12, 10, 12, 7, 9, 5, 8, 9, 7, 9, 7, 5]
insert_recvd = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 8, 10, 10, 9, 9, 10, 9, 8, 10, 10, 6, 10, 7]
get_hops = [12, 12, 12, 12, 12, 11, 9, 10, 11, 10, 12, 4, 12, 12, 10, 12, 8, 8, 5, 9, 9, 7, 9, 7, 5]
get_recvd = [10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 10, 9, 8, 10, 10, 9, 5, 8, 9, 6, 10, 10, 5, 6, 7]
num_nodes = [10, 20, 30, 40, 50, 60]
stabilization_time = [6.36, 11.52, 16.33, 25.07, 38.46, 51.01]
stabilization_time_fingers = [6.946756, 14.029971, 26.244583, 37.977037, 46.021314, 0]
if __name__ == "__main__":
'''
# setup the figure and axes
fig = plt.figure(figsize=(8, 3))
ax1 = fig.add_subplot(121, projection='3d')
# fake data
_x = np.arange(4)
_y = np.arange(5)
_xx, _yy = np.meshgrid(_x, _y)
x, y = _xx.ravel(), _yy.ravel()
top = x + y
x = join_leave_rate
y = client_rate
top = get_hops
bottom = np.zeros_like(top)
width = depth = 1
ax1.bar3d(x, y, bottom, width, depth, top, shade=True)
plt.show()
'''
'''
fail_rate_disp = np.log10(.00001 + np.array(fail_rate))
join_leave_disp = np.log10(join_leave_rate)
get_success = np.array(get_recvd) / 10
plt.scatter(client_rate[10:], get_success[10:])
plt.title("")
plt.xlabel("Log Node Failure Rate")
plt.ylabel("Avg Hops/Req")
plt.show()
'''
plt.scatter(num_nodes, stabilization_time_fingers)
plt.title("Stabilization Time (w/ Finger Tables)")
plt.xlabel("Number of Nodes")
plt.ylabel("Time to Stabilize")
plt.show()
|
#!/usr/bin/env python
# example helloworld.py
import subprocess
import gtk
import pygtk
pygtk.require('2.0')
class TextEntry(gtk.Entry):
def __init__(self, window):
gtk.Entry.__init__(self)
self.keyboard = window.keyboard
self.connect("focus-in-event", self.on_focus_in)
self.connect("focus-out-event", self.on_focus_out)
def on_focus_in(self, event, data):
self.keyboard.show()
def on_focus_out(self, event, data):
self.keyboard.hide()
class HelloWorld:
# This is a callback function. The data arguments are ignored
# in this example. More on callbacks below.
def hello(self, widget, data=None):
print "Hello World"
def delete_event(self, widget, event, data=None):
# If you return FALSE in the "delete_event" signal handler,
# GTK will emit the "destroy" signal. Returning TRUE means
# you don't want the window to be destroyed.
# This is useful for popping up 'are you sure you want to quit?'
# type dialogs.
print "delete event occurred"
# Change FALSE to TRUE and the main window will not be destroyed
# with a "delete_event".
return False
def destroy(self, widget, data=None):
print "destroy signal occurred"
gtk.main_quit()
def __init__(self):
# create a new window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("delete_event", self.delete_event)
self.window.connect("destroy", self.destroy)
self.window.set_border_width(10)
p = subprocess.Popen(["matchbox-keyboard", "--xid"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.keyboard = gtk.Socket()
self.window.add(self.keyboard)
self.keyboard.add_id(int(p.stdout.readline()))
self.keyboard.show()
self.window.show()
def main(self):
# All PyGTK applications must have a gtk.main(). Control ends here
# and waits for an event to occur (like a key press or mouse event).
gtk.main()
# If the program is run directly or passed as an argument to the python
# interpreter then create a HelloWorld instance and show it
if __name__ == "__main__":
hello = HelloWorld()
hello.main()
|
from django.apps import AppConfig
class DjangoTimestampsConfig(AppConfig):
name = 'django_timestamps'
|
#! /usr/bin/env python
# vim: set fileencoding=utf-8
# Python 2 or 3
# TODO: handle encoding/decoding in main()
"""syllable_count_eng_bf.py -- count syllables in English word.
This version uses Bloom filters to fix up counts that the
basic version gets wrong.
"""
## Copyright © 2018 Raymond D. Gardner
## Licensed under the MIT License
from __future__ import division, print_function, unicode_literals
import syllable_count_eng
import Bloom_filter
from Bloom_filter_data import undercount_filter, overcount_filter
undercount_bf = Bloom_filter.create_and_load_Bloom_filter(undercount_filter)
overcount_bf = Bloom_filter.create_and_load_Bloom_filter(overcount_filter)
def syllable_count_eng_bf(word):
n = syllable_count_eng.syllable_count_eng(word)
word = word.lower()
if word in undercount_bf:
n += 1
elif word in overcount_bf:
n -= 1
if n <= 0:
n = 1
return n
if __name__ == '__main__':
import sys, io
with io.open(sys.argv[1], encoding='utf8') as fp:
for word in fp.read().split():
k = syllable_count_eng_bf(word)
print(k, word)
|
"""
Module Info Message
:author: Zilvinas Binisevicius <zilvinas@binis.me>
"""
import json
import domipy
class InfoMessage(domipy.Message):
"""
Generic info message
"""
def __init__(self, moduleType=None, data=None):
domipy.Message.__init__(self)
self._message = ''
self.moduleType = 'INFO'
self._message = data
def populate(self, serialNumber, dataType, dataString):
pass
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict['info_message'] = self._message
return json.dumps(json_dict)
domipy.register_command("INF", InfoMessage)
domipy.register_command("!! ", InfoMessage)
domipy.register_command("APP", InfoMessage)
|
#!/usr/bin/env python
import os
import tempfile
from pathlib import Path
import requests
from telegram import Document, Update
from telegram.ext import CallbackContext, ConversationHandler, Defaults, \
MessageHandler, Updater, CommandHandler, Filters
from depixlib.LoadedImage import *
from depixlib.functions import *
DEPIX_SEARCH_IMAGES_PATH = Path(os.environ.get('DEPIX_SEARCH_IMAGES_PATH'))
BOT_TOKEN = os.environ.get('BOT_TOKEN')
SEARCH_IMAGES_TO_COMMANDS = dict()
for root, _, files in os.walk(DEPIX_SEARCH_IMAGES_PATH):
for file in files:
if file.endswith('.png'):
SEARCH_IMAGES_TO_COMMANDS[len(SEARCH_IMAGES_TO_COMMANDS)] = file
SENDING_IMAGE, SEARCH_IMAGE_CHOICE, PROCESS_IMAGE = range(3)
def start_handler(update: Update, _: CallbackContext) -> int:
update.message.reply_text('Please, send a photo:')
return SENDING_IMAGE
def image_handler(update: Update, context: CallbackContext) -> int:
text_builder = ['Please, select a search image:\n']
text_builder += [f'/{number} – {name}' for number, name in SEARCH_IMAGES_TO_COMMANDS.items()]
if update.message.document:
context.user_data['image'] = update.message.document
else:
best_photo = update.message.photo[0]
for photo in update.message.photo[1::]:
if photo.height * photo.width > best_photo.height * best_photo.height:
best_photo = photo
context.user_data['image'] = best_photo
update.message.reply_text('\n'.join(text_builder))
return SEARCH_IMAGE_CHOICE
def search_image_handler(update: Update, context: CallbackContext) -> int:
image = context.user_data['image']
image_details = image.file_name if isinstance(image, Document) else f'{image.width}x{image.height}'
search_image_id = int(update.effective_message.text[1::])
context.user_data['selected_search_image_id'] = search_image_id
text = f'Almost done to start! \n\n' \
f'Image: {image_details} \n' \
f'Selected search image: {SEARCH_IMAGES_TO_COMMANDS[search_image_id]}\n\n' \
f'/done'
update.message.reply_text(text)
return PROCESS_IMAGE
def process_handler(update: Update, context: CallbackContext) -> None:
reply = update.message.reply_text
search_image_filename = SEARCH_IMAGES_TO_COMMANDS[context.user_data['selected_search_image_id']]
path_to_search_image = DEPIX_SEARCH_IMAGES_PATH.joinpath(search_image_filename)
uploaded_photo = context.user_data['image'].get_file()
with tempfile.NamedTemporaryFile(prefix=uploaded_photo.file_id, suffix='.png') as f:
result = requests.get(uploaded_photo.file_path)
for chunk in result.iter_content(chunk_size=128):
f.write(chunk)
f.seek(0)
reply('Loading pixelated image')
pixelated_image = LoadedImage(f.name)
unpixelated_output_image = pixelated_image.getCopyOfLoadedPILImage()
reply('Loading search image')
search_image = LoadedImage(path_to_search_image)
reply('Finding color rectangles from pixelated space')
# fill coordinates here if not cut out
pixelated_rectangle = Rectangle((0, 0), (pixelated_image.width - 1, pixelated_image.height - 1))
pixelated_sub_rectangles = findSameColorSubRectangles(pixelated_image, pixelated_rectangle)
reply(f'Found {len(pixelated_sub_rectangles)} same color rectangles')
pixelated_sub_rectangles = removeMootColorRectangles(pixelated_sub_rectangles)
reply(f'{len(pixelated_sub_rectangles)} rectangles left after moot filter')
rectangle_size_occurences = findRectangleSizeOccurences(pixelated_sub_rectangles)
reply(f'Found {len(rectangle_size_occurences)} different rectangle sizes')
reply('Finding matches in search image')
rectangle_matches = findRectangleMatches(rectangle_size_occurences, pixelated_sub_rectangles, search_image)
reply('Removing blocks with no matches')
pixelated_sub_rectangles = dropEmptyRectangleMatches(rectangle_matches, pixelated_sub_rectangles)
reply('Splitting single matches and multiple matches')
single_results, pixelated_sub_rectangles = splitSingleMatchAndMultipleMatches(
pixelated_sub_rectangles, rectangle_matches
)
reply(f'[{len(single_results)} straight matches | {len(pixelated_sub_rectangles)} multiple matches]')
reply('Trying geometrical matches on single-match squares')
single_results, pixelated_sub_rectangles = findGeometricMatchesForSingleResults(
single_results, pixelated_sub_rectangles, rectangle_matches
)
reply(f'[{len(single_results)} straight matches | {len(pixelated_sub_rectangles)} multiple matches]')
reply('Trying another pass on geometrical matches')
single_results, pixelated_sub_rectangles = findGeometricMatchesForSingleResults(
single_results, pixelated_sub_rectangles, rectangle_matches
)
reply(f'[{len(single_results)} straight matches | {len(pixelated_sub_rectangles)} multiple matches]')
reply('Writing single match results to output')
writeFirstMatchToImage(single_results, rectangle_matches, search_image, unpixelated_output_image)
reply('Writing average results for multiple matches to output')
writeAverageMatchToImage(pixelated_sub_rectangles, rectangle_matches, search_image, unpixelated_output_image)
reply('Saving output image')
with tempfile.NamedTemporaryFile(prefix=f'{uploaded_photo.file_id}-output', suffix='.png') as fo:
unpixelated_output_image.save(fo.name)
reply('Sending output image')
update.message.reply_photo(fo)
reply('Done')
return ConversationHandler.END
def help_handler(update: Update, _: CallbackContext) -> None:
update.message.reply_text('pong')
def main() -> None:
updater = Updater(BOT_TOKEN, use_context=True, defaults=Defaults(run_async=True))
dispatcher = updater.dispatcher
# dispatcher.add_handler(CommandHandler('start', start_handler))
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start_handler)],
states={
SENDING_IMAGE: [
MessageHandler(Filters.photo, image_handler),
MessageHandler(Filters.document, image_handler),
],
SEARCH_IMAGE_CHOICE: [
MessageHandler(
Filters.command & Filters.regex(r'^/\d$'), search_image_handler
)
],
PROCESS_IMAGE: [
CommandHandler('done', process_handler)
]
},
fallbacks=[],
)
dispatcher.add_handler(conv_handler)
dispatcher.add_handler(MessageHandler(Filters.all, help_handler))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
# This function returns pointer to LCA of two given
# values n1 and n2
# This function assumes that n1 and n2 are present in
# Binary Tree
def findLCA(root, n1, n2):
# Base Case
if root is None:
return None
# If either n1 or n2 matches with root's key, report
# the presence by returning root (Note that if a key is
# ancestor of other, then the ancestor key becomes LCA
if root.key == n1 or root.key == n2:
return root
# Look for keys in left and right subtrees
left_lca = findLCA(root.left, n1, n2)
right_lca = findLCA(root.right, n1, n2)
# If both of the above calls return Non-NULL, then one key
# is present in once subtree and other is present in other,
# So this node is the LCA
if left_lca and right_lca:
return root
# Otherwise check if left subtree or right subtree is LCA
return left_lca if left_lca is not None else right_lca
|
# Lista os procuradores do estado de São Paulo
# Perceba que para extrair as informações, foi necessário antes entender a estrutura ao qual o dado estava armazenada
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen('https://sismpconsultapublica.mpsp.mp.br/ConsultarDistribuicao/ObterFiltrosPorMembro')
bsObj = BeautifulSoup(html, 'html.parser')
lista = bsObj.findAll('option')
for name in lista:
print(name.get_text())
|
#!/usr/bin/env python3
"""
This is an example of how the pytradfri-library can be used to pair new
devices.
To run the script, do the following:
$ pip3 install pytradfri
$ Download this file (example_pair.py)
$ python3 example_pair.py <IP>
Where <IP> is the address to your IKEA gateway. The first time
running you will be asked to input the 'Security Code' found on
the back of your IKEA gateway.
"""
# Hack to allow relative import above top level package
import sys
import os
folder = os.path.dirname(os.path.abspath(__file__)) # noqa
sys.path.insert(0, os.path.normpath("%s/.." % folder)) # noqa
from pytradfri import Gateway
from pytradfri.command import Command
from pytradfri.const import ROOT_DEVICES
from pytradfri.api.aiocoap_api import APIFactory
from pytradfri.error import PytradfriError
from pytradfri.util import load_json, save_json
import asyncio
import logging
import uuid
import argparse
logging.basicConfig(level=logging.INFO)
CONFIG_FILE = 'tradfri_standalone_psk.conf'
parser = argparse.ArgumentParser()
parser.add_argument('host', metavar='IP', type=str,
help='IP Address of your Tradfri gateway')
parser.add_argument('-K', '--key', dest='key', required=False,
help='Key found on your Tradfri gateway')
args = parser.parse_args()
if args.host not in load_json(CONFIG_FILE) and args.key is None:
print("Please provide the 'Security Code' on the back of your "
"Tradfri gateway:", end=" ")
key = input().strip()
if len(key) != 16:
raise PytradfriError("Invalid 'Security Code' provided.")
else:
args.key = key
async def run(shutdown):
# Assign configuration variables.
# The configuration check takes care they are present.
conf = load_json(CONFIG_FILE)
try:
identity = conf[args.host].get('identity')
psk = conf[args.host].get('key')
api_factory = APIFactory(host=args.host, psk_id=identity, psk=psk)
except KeyError:
identity = uuid.uuid4().hex
api_factory = APIFactory(host=args.host, psk_id=identity)
try:
psk = await api_factory.generate_psk(args.key)
print('Generated PSK: ', psk)
conf[args.host] = {'identity': identity,
'key': psk}
save_json(CONFIG_FILE, conf)
except AttributeError:
raise PytradfriError("Please provide the 'Security Code' on the "
"back of your Tradfri gateway using the "
"-K flag.")
api = api_factory.request
gateway = Gateway()
# end copy/pasted
#
# set and regularly renew the commissioning timeout, remove when done
#
async def keep_commissioning_alive(readiness):
try:
while True:
await api(gateway.set_commissioning_timeout(60))
if readiness is not None:
readiness()
readiness = None
await asyncio.sleep(45)
finally:
await api(gateway.set_commissioning_timeout(00))
commissioning_ready = asyncio.Future()
commissioning = asyncio.Task(keep_commissioning_alive(
lambda: commissioning_ready.set_result(None)))
#
# monitor the device list and give instructions
#
last_devices = None
def devices_updated(result):
nonlocal last_devices
if last_devices is None:
print("Originally, %s device(s) are known" % len(result))
else:
for r in result:
if r not in last_devices:
asyncio.Task(new_device(r))
last_devices = result
async def new_device(devno):
nonlocal commissioning
print("New device, fetching details...", end="", flush=True)
device_command = gateway.get_device(devno)
device = await api(device_command)
print()
print(" New device description: %s" % (device,))
if commissioning:
if device.has_light_control:
print("That was not in the expected sequence: This device was"
" a light and not a controller. You can still pair"
" another controller device.")
else:
print("Found a controller. You can now go ahead and add light"
" bulbs by pairing them to the switch as you would do"
" without a gateway. Press Ctrl-C when done.")
commissioning.cancel()
commissioning = None
# if you wanted to implemente infinite-commissioning mode, you
# should cancel or restart keep_commissioning_alive in a way
# that resets the timeout, because the timeout will have gone
# to 0 the moment the device was added.
else:
if not device.has_light_control:
print("That was unexpected: A controller showed up even though"
" the gateway was not in pairing mode any more.")
else:
print("You can still add more light bulbs; press Ctrl-C when"
" done.")
observe_devices = Command('get', [ROOT_DEVICES], observe=True,
process_result=devices_updated)
await api(observe_devices)
await commissioning_ready
print("Ready to start: Gateway is in commissioning mode.")
print("Pressing the pairing button on a switch, dimmer or motion detector"
" for 10s near the gateway until the gateway blinks fast. A few"
" seconds later, it the new device shows up here. You may need to"
" switch off light bulbs in the immediate vicinity (?).")
#
# run until the outer loop says not to any more
#
await api_factory.shutdown()
await shutdown
if commissioning is not None:
print("Please allow for the commissioning mode to be disabled")
commissioning.cancel()
if __name__ == "__main__":
shutdown = asyncio.Future()
main = run(shutdown)
try:
asyncio.get_event_loop().run_until_complete(main)
except KeyboardInterrupt:
shutdown.set_result(None)
asyncio.get_event_loop().run_until_complete(main)
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pytest
from art.utils import Deprecated, deprecated, deprecated_keyword_arg
logger = logging.getLogger(__name__)
class TestDeprecated:
"""
Test the deprecation decorator functions and methods.
"""
def test_deprecated_simple(self):
@deprecated("1.3.0")
def simple_addition(a, b):
return a + b
with pytest.deprecated_call():
simple_addition(1, 2)
def test_deprecated_reason_keyword(self, recwarn):
@deprecated("1.3.0", reason="With some reason message.")
def simple_addition(a, b):
return a + b
warn_msg_expected = (
"Function 'simple_addition' is deprecated and will be removed in future release 1.3.0."
"\nWith some reason message."
)
simple_addition(1, 2)
warn_obj = recwarn.pop(DeprecationWarning)
assert str(warn_obj.message) == warn_msg_expected
def test_deprecated_replaced_by_keyword(self, recwarn):
@deprecated("1.3.0", replaced_by="sum")
def simple_addition(a, b):
return a + b
warn_msg_expected = (
"Function 'simple_addition' is deprecated and will be removed in future release 1.3.0."
" It will be replaced by 'sum'."
)
simple_addition(1, 2)
warn_obj = recwarn.pop(DeprecationWarning)
assert str(warn_obj.message) == warn_msg_expected
class TestDeprecatedKeyword:
"""
Test the deprecation decorator for keyword arguments.
"""
def test_deprecated_keyword_used(self):
@deprecated_keyword_arg("a", "1.3.0")
def simple_addition(a=Deprecated, b=1):
result = a if a is Deprecated else a + b
return result
with pytest.deprecated_call():
simple_addition(a=1)
def test_deprecated_keyword_not_used(self, recwarn):
@deprecated_keyword_arg("b", "1.3.0")
def simple_addition(a, b=Deprecated):
result = a if b is Deprecated else a + b
return result
simple_addition(1)
assert len(recwarn) == 0
def test_reason(self, recwarn):
@deprecated_keyword_arg("a", "1.3.0", reason="With some reason message.")
def simple_addition(a=Deprecated, b=1):
result = a if a is Deprecated else a + b
return result
warn_msg_expected = (
"Keyword argument 'a' in 'simple_addition' is deprecated and will be removed in future release 1.3.0."
"\nWith some reason message."
)
simple_addition(a=1)
warn_obj = recwarn.pop(DeprecationWarning)
assert str(warn_obj.message) == warn_msg_expected
def test_replaced_by(self, recwarn):
@deprecated_keyword_arg("b", "1.3.0", replaced_by="c")
def simple_addition(a=1, b=Deprecated, c=1):
result = a + c if b is Deprecated else a + b
return result
warn_msg_expected = (
"Keyword argument 'b' in 'simple_addition' is deprecated and will be removed in future release 1.3.0."
" It will be replaced by 'c'."
)
simple_addition(a=1, b=1)
warn_obj = recwarn.pop(DeprecationWarning)
assert str(warn_obj.message) == warn_msg_expected
def test_replaced_by_keyword_missing_signature_error(self, recwarn):
@deprecated_keyword_arg("b", "1.3.0", replaced_by="c")
def simple_addition(a=1, b=Deprecated):
result = a if b is Deprecated else a + b
return result
exc_msg = "Deprecated keyword replacement not found in function signature."
with pytest.raises(ValueError, match=exc_msg):
simple_addition(a=1)
def test_deprecated_keyword_default_value_error(self):
@deprecated_keyword_arg("a", "1.3.0")
def simple_addition(a=None, b=1):
result = a if a is None else a + b
return result
exc_msg = "Deprecated keyword argument must default to the Decorator singleton."
with pytest.raises(ValueError, match=exc_msg):
simple_addition(a=1)
|
"""Provides helpers for Z-Wave JS device automations."""
from __future__ import annotations
from typing import cast
import voluptuous as vol
from zwave_js_server.const import ConfigurationValueType
from zwave_js_server.model.node import Node
from zwave_js_server.model.value import ConfigurationValue
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import device_registry as dr
from .const import DOMAIN
NODE_STATUSES = ["asleep", "awake", "dead", "alive"]
CONF_SUBTYPE = "subtype"
CONF_VALUE_ID = "value_id"
VALUE_ID_REGEX = r"([0-9]+-[0-9]+-[0-9]+-).+"
def get_config_parameter_value_schema(node: Node, value_id: str) -> vol.Schema | None:
"""Get the extra fields schema for a config parameter value."""
config_value = cast(ConfigurationValue, node.values[value_id])
min_ = config_value.metadata.min
max_ = config_value.metadata.max
if config_value.configuration_value_type in (
ConfigurationValueType.RANGE,
ConfigurationValueType.MANUAL_ENTRY,
):
return vol.All(vol.Coerce(int), vol.Range(min=min_, max=max_))
if config_value.configuration_value_type == ConfigurationValueType.ENUMERATED:
return vol.In({int(k): v for k, v in config_value.metadata.states.items()})
return None
def generate_config_parameter_subtype(config_value: ConfigurationValue) -> str:
"""Generate the config parameter name used in a device automation subtype."""
parameter = str(config_value.property_)
if config_value.property_key:
# Property keys for config values are always an int
assert isinstance(config_value.property_key, int)
parameter = f"{parameter}[{hex(config_value.property_key)}]"
return f"{parameter} ({config_value.property_name})"
@callback
def async_bypass_dynamic_config_validation(hass: HomeAssistant, device_id: str) -> bool:
"""Return whether device's config entries are not loaded."""
dev_reg = dr.async_get(hass)
if (device := dev_reg.async_get(device_id)) is None:
raise ValueError(f"Device {device_id} not found")
entry = next(
(
config_entry
for config_entry in hass.config_entries.async_entries(DOMAIN)
if config_entry.entry_id in device.config_entries
and config_entry.state == ConfigEntryState.LOADED
),
None,
)
return not entry
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 03 12:12:46 2017
@author: tony.withers@uwo.ca
Functions to calculate H2O molar volume (PSvolume) and fugacity (PSfug)
using the Pitzer and Sterner equation of state.
Pitzer, K.S. and Sterner, S.M., 1994. Equations of state valid
continuously from zero to extreme pressures for H2O and CO2.
Journal of Chemical Physics. 101: 3111-3116.
"""
import math
from scipy import optimize
coeff=[]
coeff.append([0,0,0.24657688e6,0.51359951e2,0,0])
coeff.append([0,0,0.58638965e0,-0.28646939e-2,0.31375577e-4,0])
coeff.append([0,0,-0.62783840e1,0.14791599e-1,0.35779579e-3,0.15432925e-7])
coeff.append([0,0,0,-0.42719875e0,-0.16325155e-4,0])
coeff.append([0,0,0.56654978e4,-0.16580167e2,0.76560762e-1,0])
coeff.append([0,0,0,0.10917883e0,0,0])
coeff.append([0.38878656e13,-0.13494878e9,0.30916564e6,0.75591105e1,0,0])
coeff.append([0,0,-0.65537898e5,0.18810675e3,0,0])
coeff.append([-0.14182435e14,0.18165390e9,-0.19769068e6,-0.23530318e2,0,0])
coeff.append([0,0,0.92093375e5,0.12246777e3,0,0])
def PSeos(volume, temperature, targetP): # cc/mol, Kelvins, bars
R=8314510 # Pa.cc/K/mol
den=1/volume # mol/cc
c=[]
for i in range(10):
c.insert(i,coeff[i][0]*temperature**-4+coeff[i][1]*temperature**-2
+coeff[i][2]*temperature**-1+coeff[i][3]
+coeff[i][4]*temperature+coeff[i][5]*temperature**2)
pressure=(den+c[0]*den**2-den**2*((c[2]+2*c[3]*den+3*c[4]*den**2
+4*c[5]*den**3)/(c[1]+c[2]*den+c[3]*den**2+c[4]*den**3
+c[5]*den**4)**2)+c[6]*den**2*math.exp(-c[7]*den)
+c[8]*den**2*math.exp(-c[9]*den))*R*temperature/1e5
return pressure-targetP # bars
def PSvolume(pressure, temperature): # bars, Kelvins
volume=optimize.root(PSeos, 10, args = (temperature, pressure))
return volume.x
def PSfugacity(pressure, temperature): # bars, Kelvins
R=8314510 # Pa.cc/K/mol
c=[]
for i in range(10):
c.insert(i,coeff[i][0]*temperature**-4+coeff[i][1]*temperature**-2
+coeff[i][2]*temperature**-1+coeff[i][3]
+coeff[i][4]*temperature+coeff[i][5]*temperature**2)
volume=PSvolume(pressure, temperature)
den=1/volume # mol/cc
fug=math.exp(math.log(den)+c[0]*den+(1/(c[1]+c[2]*den+c[3]*den**2
+c[4]*den**3+c[5]*den**4)-1/c[1])
-c[6]/c[7]*(math.exp(-c[7]*den)-1)
-c[8]/c[9]*(math.exp(-c[9]*den)-1)
+pressure*1e5/(den*R*temperature)
+math.log(R*temperature)-1)/1e5
return fug # bars
|
from typing import Any, Dict
from cyan.event import Event, EventInfo, Intent
from cyan.model.member import Member
class _MemberEvent(Event):
async def _parse_data(self, data: Dict[str, Any]) -> Member:
guild_identifier = data["guild_id"]
guild = await self._bot.get_guild(guild_identifier)
return Member(self._bot, guild, data)
class MemberJoinedEvent(_MemberEvent):
"""
当新成员加入频道时触发。
触发时回调的数据类型为 `Member`。
"""
@staticmethod
def get_event_info() -> EventInfo:
return EventInfo("GUILD_MEMBER_ADD", Intent.MEMBER)
class MemberUpdatedEvent(_MemberEvent):
"""
当成员信息更新时触发。
触发时回调的数据类型为 `Member`。
"""
@staticmethod
def get_event_info() -> EventInfo:
return EventInfo("GUILD_MEMBER_UPDATE", Intent.MEMBER)
class MemberLeftEvent(_MemberEvent):
"""
当成员离开频道时触发。
触发时回调的数据类型为 `Member`。
"""
@staticmethod
def get_event_info() -> EventInfo:
return EventInfo("GUILD_MEMBER_REMOVE", Intent.MEMBER)
|
# coding=utf-8
from setuptools import setup, find_packages
from os.path import realpath, dirname
from os import listdir
root_dir = dirname(realpath(__file__))
# data_path = '/countryinfo/data'
data_dir = root_dir+'/countryinfo/data'
# read readme file
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
# package data file
data_files = ['countryinfo/data/'+file for file in listdir(data_dir) if file.endswith(".json")]
setup(
name='countryinfo',
version='0.1.2',
python_requires='>3.0.0',
packages = find_packages(
include=['countryinfo'],
exclude='tests'
),
include_package_data=True,
test_suite="tests.Tests",
data_files=[("data", data_files)], # package data files
url='https://github.com/porimol/countryinfo',
license='MIT License',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5'
],
author=['Porimol Chandro'],
author_email=['porimolchandroroy@gmail.com'],
description='countryinfo is a python module for returning data about countries, ISO info and states/provinces within them.',
long_description=long_description,
keywords = ['countryinfo'],
install_requires=[],
)
|
import gzip
import os
import pickle
from typing import Dict, List
import jsonlines # type: ignore
from csvnpm.binary.dire_types import TypeLib
from csvnpm.binary.function import CollectedFunction, Function
from csvnpm.binary.ida_ast import AST
from csvnpm.ida import ida_lines
from csvnpm.ida import idaapi as ida
from csvnpm.ida import idautils
from .collect import Collector
class CollectDecompiler(Collector):
"""Class for collecting decompiler-specific information"""
def __init__(self):
print("Initializing collect decompiler")
super().__init__()
print("Loading functions")
# Load the functions collected by CollectDebug
with open(os.environ["FUNCTIONS"], "rb") as functions_fh:
self.debug_functions: Dict[int, Function] = pickle.load(functions_fh)
print("Done")
self.functions: List[CollectedFunction] = list()
self.output_file_name = os.path.join(
os.environ["OUTPUT_DIR"],
"bins",
os.environ["PREFIX"] + ".jsonl.gz",
)
def write_info(self) -> None:
with gzip.open(self.output_file_name, "wt") as output_file:
with jsonlines.Writer(output_file, compact=True) as writer:
for cf in self.functions:
writer.write(cf.to_json())
def activate(self, ctx) -> int:
"""Collects types, user-defined variables, their locations in addition to the
AST and raw code.
"""
print("Collecting vars and types.")
for ea in (ea for ea in idautils.Functions() if ea in self.debug_functions):
# Decompile
f = ida.get_func(ea)
cfunc = None
try:
cfunc = ida.decompile(f)
except ida.DecompilationFailure:
continue
if cfunc is None:
continue
# Function info
name: str = ida.get_func_name(ea)
self.type_lib.add_ida_type(cfunc.type.get_rettype())
return_type = TypeLib.parse_ida_type(cfunc.type.get_rettype())
arguments = self.collect_variables(
f.frsize, cfunc.get_stkoff_delta(), cfunc.arguments
)
local_vars = self.collect_variables(
f.frsize,
cfunc.get_stkoff_delta(),
[v for v in cfunc.get_lvars() if not v.is_arg_var],
)
raw_code = ""
for line in cfunc.get_pseudocode():
raw_code += f"{' '.join(ida_lines.tag_remove(line.line).split())}\n"
ast = AST(function=cfunc)
decompiler = Function(
ast=ast,
name=name,
return_type=return_type,
arguments=arguments,
local_vars=local_vars,
raw_code=raw_code,
)
self.functions.append(
CollectedFunction(
ea=ea,
debug=self.debug_functions[ea],
decompiler=decompiler,
)
)
self.write_info()
return 1
def main():
ida.auto_wait()
if not ida.init_hexrays_plugin():
ida.load_plugin("hexrays")
ida.load_plugin("hexx64")
if not ida.init_hexrays_plugin():
print("Unable to load Hex-rays")
ida.qexit(1)
else:
print(f"Hex-rays version {ida.get_hexrays_version()}")
decompiler = CollectDecompiler()
decompiler.activate(None)
print("Done with activate")
ida.qexit(0)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse, os, re, subprocess
from collections import defaultdict
script_path = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_path)))
projects = ['alfresco-community-repo', 'alfresco-enterprise-repo', 'alfresco-enterprise-share', 'acs-community-packaging', 'acs-packaging']
project_dependencies = {'alfresco-community-repo': ['alfresco-enterprise-repo', 'acs-community-packaging'], 'alfresco-enterprise-repo': ['acs-packaging'], 'alfresco-enterprise-share': ['acs-packaging']}
project_dir = {project: os.path.join(root_dir, project) for project in projects}
version_string = {project: '<dependency.{0}.version>{{}}</dependency.{0}.version>'.format(project) for project in projects}
parser = argparse.ArgumentParser(description='Find the tags that contain commits referencing a ticket.')
parser.add_argument('-j', '--jira', help='The ticket number to search for.', required=True)
parser.add_argument('-a', '--all', action='store_true', help='Display all releases containing fix.')
parser.add_argument('-r', '--release', action='store_true', help='Only consider full releases.')
parser.add_argument('-p', '--packaged', action='store_true', help='Include information about how the commit is packaged.')
parser.add_argument('-s', '--skipfetch', action='store_true', help='Skip the git fetch step - only include commits that are stored locally.')
args = parser.parse_args()
# The filter to use to avoid considering test tags.
version_filter = r'^[0-9]+(\.[0-9]+)*$' if args.release else r'^[0-9]+(\.[0-9]+)*(-(A|M|RC)[0-9]+)?$'
def run_command(command_parts, project):
"""Run the command and return the output string."""
output = subprocess.run(command_parts, cwd=project_dir[project], capture_output=True)
return output.stdout.decode('utf-8')
def run_list_command(command_parts, project):
"""Run the command and return the lines of the output as a list of strings."""
output = run_command(command_parts, project).strip().split('\n')
if '' in output:
output.remove('')
return output
def compare_version_part(a_part, b_part):
"""Compare two parts of a version number and return the difference (taking into account
versions like 7.0.0-M1)."""
try:
a_bits = a_part.split('-')
b_bits = b_part.split('-')
version_difference = int(a_bits[0]) - int(b_bits[0])
if version_difference != 0 or (len(a_bits) == 1 and len(b_bits) == 1):
return version_difference
if len(a_bits) != len(b_bits):
# Fewer parts indicates a later version (e.g. '7.0.0' is later than '7.0.0-M1')
return len(b_bits) - len(a_bits)
# If letter doesn't match then we can't compare the versions.
a_letter = a_bits[1][0]
b_letter = b_bits[1][0]
if a_letter != b_letter:
return 0
# Try to get number from after M, A or RC and compare this.
a_number_start = [char.isdigit() for char in a_bits[1]].index(True)
b_number_start = [char.isdigit() for char in b_bits[1]].index(True)
return int(a_bits[1][a_number_start:]) - int(b_bits[1][b_number_start:])
except ValueError:
# If the strings aren't in the format we're expecting then we can't compare them.
return 0
def tag_before(tag_a, tag_b):
"""Return True if the version number from tag_a is lower than tag_b."""
a_parts = list(tag_a.split('.'))
b_parts = list(tag_b.split('.'))
for part_index, b_part in enumerate(b_parts):
if len(a_parts) <= part_index:
return True
difference = compare_version_part(a_parts[part_index], b_part)
if difference < 0:
return True
elif difference > 0:
return False
return len(a_parts) <= len(b_parts)
def reduce_tags(tags):
"""Filter a set of tags to return only those that aren't descendents from others in the list."""
reduced_tags = []
for tag_a in tags:
include = True
for tag_b in tags:
if tag_a == tag_b:
continue
if not tag_before(tag_a, tag_b):
include = False
break
if include:
reduced_tags.append(tag_a)
return reduced_tags
def find_tags_containing(project, commit):
"""Find all tags containing the given commit. Returns the full list and a condensed list (excluding tags 'after' other tags in the list)."""
tags = run_list_command(['git', 'tag', '--contains', commit], project)
# The packaging projects had a different format for older tags.
if project in ['acs-packaging', 'acs-community-packaging']:
# Remove the prefix 'acs-packaging-' if it's present.
tags = list(map(lambda tag: tag.replace('{}-'.format(project), ''), tags))
# Exclude tags that aren't just chains of numbers with an optional suffix.
tags = list(filter(lambda tag: re.match(version_filter, tag), tags))
# Filter out tags that are before other tags.
reduced_tags = reduce_tags(tags)
return tags, reduced_tags
for project in projects:
if not args.skipfetch:
run_command(['git', 'fetch'], project)
commits = run_list_command(['git', 'rev-list', '--all', '--grep', args.jira], project)
for original_commit in commits:
tags, reduced_tags = find_tags_containing(project, original_commit)
tag_info = ', '.join(tags if args.all else reduced_tags)
packaging_info = ''
if args.packaged:
pairs = [(project, tag) for tag in reduced_tags]
packaged = defaultdict(set)
while pairs:
dependency, tag = pairs.pop()
if dependency not in project_dependencies.keys():
packaged[dependency].add(tag)
else:
# Try to find pairs from next project up.
for ancestor_project in project_dependencies[dependency]:
commits = run_list_command(['git', 'log', '--all', '--pretty=format:%h', '-S', version_string[dependency].format(tag), '--', 'pom.xml'], ancestor_project)
for commit in commits:
_, found_tags = find_tags_containing(ancestor_project, commit)
pairs += [(ancestor_project, found_tag) for found_tag in found_tags]
pairs = list(set(pairs))
if packaged:
packaged_set = set()
for packaging_project, packaged_tags in packaged.items():
for packaged_tag in reduce_tags(packaged_tags):
packaged_set.add((packaging_project, packaged_tag))
packaging_info = ' ({})'.format(', '.join('{}:{}'.format(*pair) for pair in packaged_set))
print('{:.7s} is in {}: {}{}'.format(original_commit, project, tag_info, packaging_info))
|
from django.urls import path
from label import views
#from label import processing
import os
urlpatterns = [
path('', views.index, name='index'),
path('home/', views.homepage, name='homepage'),
path('user_login/', views.user_login, name='user_login'),
path('development_tracker/', views.development_tracker, name='developmentTracker'),
path('qgis_support/', views.qgis_support, name='qgisSupport'),
path('labelme_support/', views.labelme_support, name='labelmeSupport'),
path('qgis_support_response/', views.qgis_response, name='qgisResponse'),
path('get_csv/', views.get_csv, name='getCsv'),
path('get_mask/', views.get_mask, name='getPng'),
path('get_json/', views.get_json, name='getJson'),
path('labelme_support_response/', views.labelme_response, name='labelmeResponse'),
path('development_tracker_response/', views.development_tracker_response, name='developmentTrackerResponse'),
]
|
from django.conf import settings
from django.test import TestCase, override_settings
from recruitment.templatetags.instructorrecruitment import (
is_instructor_recruitment_enabled,
)
class TestInstructorRecruitmentTemplateTags(TestCase):
def test_feature_flag_enabled(self) -> None:
with self.settings(INSTRUCTOR_RECRUITMENT_ENABLED=False):
self.assertEqual(is_instructor_recruitment_enabled(), False)
with self.settings(INSTRUCTOR_RECRUITMENT_ENABLED=True):
self.assertEqual(is_instructor_recruitment_enabled(), True)
@override_settings()
def test_feature_flag_removed(self) -> None:
del settings.INSTRUCTOR_RECRUITMENT_ENABLED
self.assertEqual(is_instructor_recruitment_enabled(), False)
|
import pandas as pd
import numpy as np
from keras.models import Sequential
import tensorflow as tf
import os
from Bio import SeqIO
from keras.preprocessing.text import text_to_word_sequence
from keras.preprocessing.text import one_hot
from keras.preprocessing import sequence
# load and preprocess
# insert here the csv created via TrainingDataCreation.py
df = pd.read_csv("inserted.csv")
df.columns = ['seq_label','sequence']
print(df['sequence'])
from textwrap import wrap
# cut to kmers
kmer_size = 1
#cut to kmers
df['sequence'] = df.apply(lambda x: wrap(x['sequence'], kmer_size), axis=1)
df['sequence'] = [','.join(map(str, l)) for l in df['sequence']]
max_length = df.sequence.map(lambda x: len(x)).max()
max_length = max_length/kmer_size
df['sequence'] = df.apply(lambda x: text_to_word_sequence(x['sequence'], split=','), axis=1)
df['sequence'] = df['sequence'].astype(str)
vocab_max = 4 ** kmer_size
print(vocab_max)
# integer encode the document
df['sequence'] = df.apply(lambda x: one_hot(x['sequence'], round(vocab_max)), axis=1)
from sklearn.utils import shuffle, compute_class_weight
#shuffel gets some more accuracy
df = shuffle(df)
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
dataset = df.values
Y = dataset[:,0]
encoder_label = LabelEncoder()
encoder_label.fit(Y)
encoded_Y = encoder_label.transform(Y)
dummy_y = np_utils.to_categorical(encoded_Y)
target_softmax = dummy_y
from sklearn.utils import class_weight
#max_lengthtest can be used for fixed length,repleace max_length in pad_sequences
max_lengthtest = 150
train_numpybig = df["sequence"].values
train_numpybig = sequence.pad_sequences(train_numpybig,int(max_length),padding='post',truncating='post')
print(target_softmax)
print(train_numpybig)
from tensorflow.keras import layers
# A integer input for vocab indices.
inputs = tf.keras.Input(shape=(None,), dtype="int64")
# Next, we add a layer to map those vocab indices into a space of dimensionality
# 'embedding_dim'.
x = layers.Embedding(vocab_max,64)(inputs)
x = layers.Dropout(0.5)(x)
# Conv1D + global max pooling
x = layers.Conv1D(128, 10, padding="valid", activation="relu", strides=3)(x)
x = layers.Conv1D(128, 10, padding="valid", activation="relu", strides=3)(x)
x = layers.GlobalMaxPooling1D()(x)
# We add a vanilla hidden layer:
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.5)(x)
# We project onto a single unit output layer, and squash it with a sigmoid:
predictions = layers.Dense(target_softmax.shape[1], activation="sigmoid", name="predictions")(x)
model = tf.keras.Model(inputs, predictions)
# Compile the model with binary crossentropy loss and an adam optimizer.
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
#train with full data and add validation split in fit or use test set
model.fit(train_numpybig,target_softmax,epochs=10,batch_size=2)
scores = model.evaluate(train_numpybig,target_softmax, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# save model and architecture to single file
model.save("modelconv.h5")
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from time import sleep
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from appium.webdriver.common.multi_action import MultiAction
from test.functional.android import desired_capabilities
# the emulator is sometimes slow and needs time to think
SLEEPY_TIME = 1
class MultiActionTests(unittest.TestCase):
def setUp(self):
desired_caps = desired_capabilities.get_desired_capabilities('ApiDemos-debug.apk')
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
self.driver.quit()
def test_parallel_actions(self):
el1 = self.driver.find_element_by_name('Content')
el2 = self.driver.find_element_by_name('Animation')
self.driver.scroll(el1, el2)
el = self.driver.find_element_by_name('Views')
action = TouchAction(self.driver)
action.tap(el).perform()
el = self.driver.find_element_by_name('Expandable Lists')
# simulate a swipe/scroll
action.press(el).move_to(x=100, y=-1000).release().perform()
el = self.driver.find_element_by_name('Splitting Touches across Views')
action.tap(el).perform()
els = self.driver.find_elements_by_class_name('android.widget.ListView')
a1 = TouchAction()
a1.press(els[0]) \
.move_to(x=10, y=0).move_to(x=10, y=-75).move_to(x=10, y=-600).release()
a2 = TouchAction()
a2.press(els[1]) \
.move_to(x=10, y=10).move_to(x=10, y=-300).move_to(x=10, y=-600).release()
ma = MultiAction(self.driver, els[0])
ma.add(a1, a2)
ma.perform()
def test_actions_with_waits(self):
el1 = self.driver.find_element_by_name('Content')
el2 = self.driver.find_element_by_name('Animation')
self.driver.scroll(el1, el2)
el = self.driver.find_element_by_name('Views')
action = TouchAction(self.driver)
action.tap(el).perform()
el = self.driver.find_element_by_name('Expandable Lists')
# simulate a swipe/scroll
action.press(el).move_to(x=100, y=-1000).release().perform()
el = self.driver.find_element_by_name('Splitting Touches across Views')
action.tap(el).perform()
els = self.driver.find_elements_by_class_name('android.widget.ListView')
a1 = TouchAction()
a1.press(els[0]) \
.move_to(x=10, y=0) \
.move_to(x=10, y=-75) \
.wait(1000) \
.move_to(x=10, y=-600) \
.release()
a2 = TouchAction()
a2.press(els[1]) \
.move_to(x=10, y=10) \
.move_to(x=10, y=-300) \
.wait(500) \
.move_to(x=10, y=-600) \
.release()
ma = MultiAction(self.driver, els[0])
ma.add(a1, a2)
ma.perform()
def test_driver_multi_tap(self):
el = self.driver.find_element_by_name('Graphics')
action = TouchAction(self.driver)
action.tap(el).perform()
els = self.driver.find_elements_by_class_name('android.widget.TextView')
self.driver.scroll(els[len(els) - 1], els[0])
els = self.driver.find_elements_by_class_name('android.widget.TextView')
if els[len(els) - 1].get_attribute('name') != 'Xfermodes':
self.driver.scroll(els[len(els) - 1], els[0])
el = self.driver.find_element_by_name('Touch Paint')
action.tap(el).perform()
positions = [(100, 200), (100, 400)]
# makes two dots in the paint program
# THE TEST MUST BE WATCHED TO CHECK IF IT WORKS
self.driver.tap(positions)
sleep(10)
def test_driver_pinch(self):
el1 = self.driver.find_element_by_name('Content')
el2 = self.driver.find_element_by_name('Animation')
self.driver.scroll(el1, el2)
el = self.driver.find_element_by_name('Views')
action = TouchAction(self.driver)
action.tap(el).perform()
els = self.driver.find_elements_by_class_name('android.widget.TextView')
self.driver.scroll(els[len(els) - 1], els[0])
els = self.driver.find_elements_by_class_name('android.widget.TextView')
if els[len(els) - 1].get_attribute('name') != 'WebView':
self.driver.scroll(els[len(els) - 1], els[0])
el = self.driver.find_element_by_name('WebView')
action.tap(el).perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_id('com.example.android.apis:id/wv1')
self.driver.pinch(element=el)
def test_driver_zoom(self):
el1 = self.driver.find_element_by_name('Content')
el2 = self.driver.find_element_by_name('Animation')
self.driver.scroll(el1, el2)
el = self.driver.find_element_by_name('Views')
action = TouchAction(self.driver)
action.tap(el).perform()
els = self.driver.find_elements_by_class_name('android.widget.TextView')
self.driver.scroll(els[len(els) - 1], els[0])
els = self.driver.find_elements_by_class_name('android.widget.TextView')
if els[len(els) - 1].get_attribute('name') != 'WebView':
self.driver.scroll(els[len(els) - 1], els[0])
el = self.driver.find_element_by_name('WebView')
action.tap(el).perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_id('com.example.android.apis:id/wv1')
self.driver.zoom(element=el)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(MultiActionTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
import logging
import autograd.numpy as np
from autograd import grad
from mla.base import BaseEstimator
from mla.metrics.metrics import mean_squared_error, binary_crossentropy
np.random.seed(1000)
class BasicRegression(BaseEstimator):
def __init__(self, lr=0.001, penalty="None", C=0.01, tolerance=0.0001, max_iters=1000):
"""Basic class for implementing continuous regression estimators which
are trained with gradient descent optimization on their particular loss
function.
Parameters
----------
lr : float, default 0.001
Learning rate.
penalty : str, {'l1', 'l2', None'}, default None
Regularization function name.
C : float, default 0.01
The regularization coefficient.
tolerance : float, default 0.0001
If the gradient descent updates are smaller than `tolerance`, then
stop optimization process.
max_iters : int, default 10000
The maximum number of iterations.
"""
self.C = C
self.penalty = penalty
self.tolerance = tolerance
self.lr = lr
self.max_iters = max_iters
self.errors = []
self.theta = []
self.n_samples, self.n_features = None, None
self.cost_func = None
def _loss(self, w):
raise NotImplementedError()
def init_cost(self):
raise NotImplementedError()
def _add_penalty(self, loss, w):
"""Apply regularization to the loss."""
if self.penalty == "l1":
loss += self.C * np.abs(w[1:]).sum()
elif self.penalty == "l2":
loss += (0.5 * self.C) * (w[1:] ** 2).sum()
return loss
def _cost(self, X, y, theta):
prediction = X.dot(theta)
error = self.cost_func(y, prediction)
return error
def fit(self, X, y=None):
self._setup_input(X, y)
self.init_cost()
self.n_samples, self.n_features = X.shape
# Initialize weights + bias term
self.theta = np.random.normal(size=(self.n_features + 1), scale=0.5)
# Add an intercept column
self.X = self._add_intercept(self.X)
self._train()
@staticmethod
def _add_intercept(X):
b = np.ones([X.shape[0], 1])
return np.concatenate([b, X], axis=1)
def _train(self):
self.theta, self.errors = self._gradient_descent()
logging.info(" Theta: %s" % self.theta.flatten())
def _predict(self, X=None):
X = self._add_intercept(X)
return X.dot(self.theta)
def _gradient_descent(self):
theta = self.theta
errors = [self._cost(self.X, self.y, theta)]
# Get derivative of the loss function
cost_d = grad(self._loss)
for i in range(1, self.max_iters + 1):
# Calculate gradient and update theta
delta = cost_d(theta)
theta -= self.lr * delta
errors.append(self._cost(self.X, self.y, theta))
logging.info("Iteration %s, error %s" % (i, errors[i]))
error_diff = np.linalg.norm(errors[i - 1] - errors[i])
if error_diff < self.tolerance:
logging.info("Convergence has reached.")
break
return theta, errors
class LinearRegression(BasicRegression):
"""Linear regression with gradient descent optimizer."""
def _loss(self, w):
loss = self.cost_func(self.y, np.dot(self.X, w))
return self._add_penalty(loss, w)
def init_cost(self):
self.cost_func = mean_squared_error
class LogisticRegression(BasicRegression):
"""Binary logistic regression with gradient descent optimizer."""
def init_cost(self):
self.cost_func = binary_crossentropy
def _loss(self, w):
loss = self.cost_func(self.y, self.sigmoid(np.dot(self.X, w)))
return self._add_penalty(loss, w)
@staticmethod
def sigmoid(x):
return 0.5 * (np.tanh(0.5 * x) + 1)
def _predict(self, X=None):
X = self._add_intercept(X)
return self.sigmoid(X.dot(self.theta))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Olivier Noguès
from app import db
import datetime
# -------------------------------------------------------------------------------------------------------------------
# SECTION DEDICATED TO THE DOCUMENTATION
#
class AresDoc(db.Model):
__bind_key__ = 'documentation'
__tablename__ = 'doc'
doc_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
hash_id = db.Column(db.NUMERIC, nullable=False)
src_id = db.Column(db.Integer, nullable=False)
dsc = db.Column(db.Text, nullable=False)
rubric = db.Column(db.String(120), nullable=False)
category = db.Column(db.String(120), nullable=False)
module = db.Column(db.String(50), nullable=False)
cls = db.Column(db.String(50), nullable=False)
fnc = db.Column(db.String(50), nullable=False)
type = db.Column(db.String(20), nullable=False) # FUNCTION, CLASS, MODULE
section = db.Column(db.String(20), nullable=False) # ARES, REPORTS, CONNECTOR...
return_dsc = db.Column(db.Text, nullable=False)
name = db.Column(db.String(120), nullable=False)
hostname = db.Column(db.String(120), nullable=False)
mac_address = db.Column(db.Integer, nullable=False)
lst_mod_dt = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __init__(self, dsc, rubric, category, module, cls, fnc, doc_type, section, name,
return_dsc, user_name, hostname, mac_address, src_id):
self.dsc, self.rubric, self.category, self.module, self.cls = dsc, rubric, category, module, cls
self.fnc, self.type, self.section, self.name, self.src_id = fnc, doc_type, section, name, src_id
self.hash_id = AresSiphash.SipHash().hashId("%s_%s_%s_%s" % (module, cls, fnc, doc_type) )
self.return_dsc, self.user_name, self.hostname, self.mac_address = return_dsc, user_name, hostname, mac_address
class AresDocView(db.Model):
__bind_key__ = 'documentation'
__tablename__ = 'doc_view'
vw_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
hash_id = db.Column(db.NUMERIC, nullable=False)
user_name = db.Column(db.String(120), nullable=False)
hostname = db.Column(db.String(120), nullable=False)
mac_address = db.Column(db.Integer, nullable=False)
clc_dt = db.Column(db.String(10), nullable=False)
lst_mod_dt = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __init__(self, module, cls, fnc, doc_type, user_name, hostname, mac_address):
self.user_name, self.hostname, self.mac_address = user_name, hostname, mac_address
self.hash_id = hashlib.sha224("%s_%s_%s_%s" % (module, cls, fnc, doc_type))
self.clc_dt = datetime.datetime.today().strftime('%Y-%m-%d')
class AresDocComments(db.Model):
__bind_key__ = 'documentation'
__tablename__ = 'doc_comments'
com_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
hash_id = db.Column(db.NUMERIC, nullable=False)
com = db.Column(db.Text, nullable=False)
status = db.Column(db.Integer, nullable=False) # 1 active, 0 closed
user_name = db.Column(db.String(120), nullable=False)
hostname = db.Column(db.String(120), nullable=False)
mac_address = db.Column(db.Integer, nullable=False)
clc_dt = db.Column(db.String(10), nullable=False)
lst_mod_dt = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __init__(self, module, cls, fnc, doc_type, com, status, user_name, hostname, mac_address):
self.com, self.status, self.user_name, self.hostname, self.mac_address = com, status, user_name, hostname, mac_address
self.hash_id = AresSiphash.SipHash().hashId("%s_%s_%s_%s" % (module, cls, fnc, doc_type))
self.clc_dt = datetime.datetime.today().strftime('%Y-%m-%d')
class AresDocExamples(db.Model):
__bind_key__ = 'documentation'
__tablename__ = 'doc_examples'
ex_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
hash_id = db.Column(db.NUMERIC, nullable=False)
ex = db.Column(db.Text, nullable=False)
user_name = db.Column(db.String(120), nullable=False)
hostname = db.Column(db.String(120), nullable=False)
mac_address = db.Column(db.Integer, nullable=False)
clc_dt = db.Column(db.String(10), nullable=False)
lst_mod_dt = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __init__(self, module, cls, fnc, doc_type, ex, user_name, hostname, mac_address):
self.ex, self.user_name, self.hostname, self.mac_address = ex, user_name, hostname, mac_address
self.hash_id = AresSiphash.SipHash().hashId("%s_%s_%s_%s" % (module, cls, fnc, doc_type))
self.clc_dt = datetime.datetime.today().strftime('%Y-%m-%d')
class AresDocTags(db.Model):
__bind_key__ = 'documentation'
__tablename__ = 'doc_static_tags'
ex_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
hash_id = db.Column(db.NUMERIC, nullable=False)
tag = db.Column(db.Text, nullable=False)
user_name = db.Column(db.String(120), nullable=False)
hostname = db.Column(db.String(120), nullable=False)
mac_address = db.Column(db.Integer, nullable=False)
clc_dt = db.Column(db.String(10), nullable=False)
lst_mod_dt = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __init__(self, module, cls, fnc, doc_type, tag, user_name, hostname, mac_address):
self.tag, self.user_name, self.hostname, self.mac_address = tag, user_name, hostname, mac_address
self.hash_id = AresSiphash.SipHash().hashId("%s_%s_%s_%s" % (module, cls, fnc, doc_type))
self.clc_dt = datetime.datetime.today().strftime('%Y-%m-%d')
class AresDocLinks(db.Model):
__bind_key__ = 'documentation'
__tablename__ = 'doc_links'
link_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
hash_id = db.Column(db.NUMERIC, nullable=False)
link_name = db.Column(db.Text, nullable=False)
link_url = db.Column(db.Text, nullable=False)
user_name = db.Column(db.String(120), nullable=False)
hostname = db.Column(db.String(120), nullable=False)
mac_address = db.Column(db.Integer, nullable=False)
clc_dt = db.Column(db.String(10), nullable=False)
lst_mod_dt = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __init__(self, module, cls, fnc, doc_type, link_name, link_url, user_name, hostname, mac_address):
self.clc_dt = datetime.datetime.today().strftime('%Y-%m-%d')
self.link_name, self.link_url = link_name, link_url
self.user_name, self.hostname, self.mac_address = user_name, hostname, mac_address
self.hash_id = AresSiphash.SipHash().hashId("%s_%s_%s_%s" % (module, cls, fnc, doc_type))
class AresDocParams(db.Model):
__bind_key__ = 'documentation'
__tablename__ = 'doc_params'
param_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
arc_id = db.Column(db.Integer, nullable=False)
param_type = db.Column(db.Text, nullable=False)
hash_id = db.Column(db.NUMERIC, nullable=False)
name = db.Column(db.Text, nullable=False)
dsc = db.Column(db.Text, nullable=False)
duck_type = db.Column(db.Text, nullable=False)
user_name = db.Column(db.String(120), nullable=False)
hostname = db.Column(db.String(120), nullable=False)
mac_address = db.Column(db.Integer, nullable=False)
clc_dt = db.Column(db.String(10), nullable=False)
lst_mod_dt = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __init__(self, module, cls, fnc, doc_type, param_name, param_dsc, param_duck_type, user_name,
hostname, mac_address):
self.clc_dt = datetime.datetime.today().strftime('%Y-%m-%d')
self.user_name, self.hostname, self.mac_address = user_name, hostname, mac_address
self.param_name, self.param_dsc, self.param_duck_type = param_name, param_dsc, param_duck_type
self.hash_id = AresSiphash.SipHash().hashId("%s_%s_%s_%s" % (module, cls, fnc, doc_type))
class AresDocIO(db.Model):
__bind_key__ = 'documentation'
__tablename__ = 'doc_io'
io_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
cod = db.Column(db.Text, nullable=False)
dsc = db.Column(db.Text, nullable=False)
typ = db.Column(db.Text, nullable=False)
user_name = db.Column(db.String(120), nullable=False)
hostname = db.Column(db.String(120), nullable=False)
mac_address = db.Column(db.Integer, nullable=False)
clc_dt = db.Column(db.String(10), nullable=False)
lst_mod_dt = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
|
"""
timestamp is part of the AndroidGeodata project,
see the project page for more information https://github.com/robiame/AndroidGeodata.
timestamp is a class that collects methods to manage the conversion among the different date formats.
Copyright (C) 2016 Roberto Amelio
This file is part of AndroidGeodata.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import time
class timestamp:
"""It contains useful methods"""
@staticmethod
def getTimestampFromString(value):
""" Converts from ISO 8601 to epoch timestamp.
ISO 8601 defines the international standard representation of dates and times.
Args:
value: date and time in ISO 8601 format
Returns:
None: error during the conversion
Timestamp
"""
try:
d = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except:
return None
else:
return int(time.mktime(d.timetuple()))
@staticmethod
def getTimestampFromPicDatetime(value):
""" Converts from DateTime extracted from a pic to epoch timestamp.
Args:
value: date and time from pic
Returns:
Timestamp
"""
date = value.split(" ")[0].split(":")
time_var = value.split(" ")[1].split(":")
d = datetime.datetime(year=int(date[0]),month=int(date[1]),day=int(date[2]),hour=int(time_var[0]),minute=int(time_var[1]),second=int(time_var[2]))
return int(time.mktime(d.timetuple()))
|
"""Abstract collection for UserContent and sub-classes of StaticSection, HiglassViewConfig, etc."""
from uuid import uuid4
from snovault import (
abstract_collection,
calculated_property,
collection,
load_schema
)
from snovault.interfaces import STORAGE
from .base import (
Item,
ALLOW_CURRENT,
DELETED,
ALLOW_LAB_SUBMITTER_EDIT,
ALLOW_VIEWING_GROUP_VIEW,
ONLY_ADMIN_VIEW,
ALLOW_OWNER_EDIT,
ALLOW_ANY_USER_ADD,
lab_award_attribution_embed_list
)
import os
import requests
@abstract_collection(
name='user-contents',
unique_key='user_content:name',
properties={
'title': "User Content Listing",
'description': 'Listing of all types of content which may be created by people.',
})
class UserContent(Item):
item_type = 'user_content'
base_types = ['UserContent'] + Item.base_types
schema = load_schema('encoded:schemas/user_content.json')
embedded_list = lab_award_attribution_embed_list
STATUS_ACL = { # Defaults + allow owner to edit (in case owner has no labs or submit_for)
'released' : ALLOW_OWNER_EDIT + ALLOW_CURRENT,
'deleted' : ALLOW_OWNER_EDIT + DELETED,
'draft' : ALLOW_OWNER_EDIT + ONLY_ADMIN_VIEW,
'released to lab' : ALLOW_OWNER_EDIT + ALLOW_LAB_SUBMITTER_EDIT,
'released to project' : ALLOW_OWNER_EDIT + ALLOW_VIEWING_GROUP_VIEW,
# 'archived' : ALLOW_OWNER_EDIT + ALLOW_CURRENT,
# 'archived to project' : ALLOW_OWNER_EDIT + ALLOW_VIEWING_GROUP_VIEW
}
@calculated_property(schema={
"title": "Content",
"description": "Content (unused)",
"type": "string"
})
def content(self, request):
return None
@calculated_property(schema={
"title": "File Type",
"description": "Type of this Item (unused)",
"type": "string"
})
def filetype(self, request):
return None
def _update(self, properties, sheets=None):
if properties.get('name') is None and self.uuid is not None:
properties['name'] = str(self.uuid)
super(UserContent, self)._update(properties, sheets)
@classmethod
def create(cls, registry, uuid, properties, sheets=None):
submitted_by_uuid = properties.get('submitted_by')
lab_schema = cls.schema and cls.schema.get('properties', {}).get('lab')
award_schema = cls.schema and cls.schema.get('properties', {}).get('award')
if (
not submitted_by_uuid # Shouldn't happen
or (not lab_schema and not award_schema) # If not applicable for Item type (shouldn't happen as props defined on UserContent schema)
or ('lab' in properties or 'award' in properties) # If values exist already - ideal case - occurs for general submission process(es)
):
# Default for all other Items
return super(UserContent, cls).create(registry, uuid, properties, sheets)
submitted_by_item = registry[STORAGE].get_by_uuid(submitted_by_uuid)
if submitted_by_item:
# All linkTo property values, if present, are UUIDs
if 'lab' not in properties and 'lab' in submitted_by_item.properties:
# Use lab of submitter - N.B. this differs from other Items where lab comes from 'submits_for' list.
properties['lab'] = submitted_by_item.properties['lab']
if 'award' not in properties and 'lab' in submitted_by_item.properties:
lab_item = registry[STORAGE].get_by_uuid(submitted_by_item.properties['lab'])
if lab_item and len(lab_item.properties.get('awards', [])) > 0:
# Using first award as default/fallback when award not explicitly selected/sent.
properties['award'] = lab_item.properties['awards'][0]
return super(UserContent, cls).create(registry, uuid, properties, sheets)
@collection(
name='static-sections',
unique_key='user_content:name',
properties={
'title': 'Static Sections',
'description': 'Static Sections for the Portal',
})
class StaticSection(UserContent):
"""The Software class that contains the software... used."""
item_type = 'static_section'
schema = load_schema('encoded:schemas/static_section.json')
@calculated_property(schema={
"title": "Content",
"description": "Content for the page",
"type": "string"
})
def content(self, request, body=None, file=None):
if isinstance(body, str) or isinstance(body, dict) or isinstance(body, list):
# Don't need to load in anything. We don't currently support dict/json body (via schema) but could in future.
return body
if isinstance(file, str):
if file[0:4] == 'http' and '://' in file[4:8]: # Remote File
return get_remote_file_contents(file)
else: # Local File
file_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../.." + file) # Go to top of repo, append file
return get_local_file_contents(file_path)
return None
@calculated_property(schema={
"title": "File Type",
"description": "Type of file used for content",
"type": "string"
})
def filetype(self, request, body=None, file=None, options=None):
if options and options.get('filetype') is not None:
return options['filetype']
if isinstance(body, str):
return 'txt'
if isinstance(body, dict) or isinstance(body, list):
return 'json'
if isinstance(file, str):
filename_parts = file.split('.')
if len(filename_parts) > 1:
return filename_parts[len(filename_parts) - 1]
else:
return 'txt' # Default if no file extension.
return None
@collection(
name='higlass-view-configs',
unique_key='user_content:name',
properties={
'title': 'HiGlass Displays',
'description': 'Displays and view configurations for HiGlass',
})
class HiglassViewConfig(UserContent):
"""
Item type which contains a `view_config` property and other metadata.
"""
item_type = 'higlass_view_config'
schema = load_schema('encoded:schemas/higlass_view_config.json')
#@calculated_property(schema={
# "title": "ViewConfig Files",
# "description": "List of files which are defined in ViewConfig",
# "type": "array",
# "linkTo" : "File"
#})
#def viewconfig_files(self, request):
# '''
# TODO: Calculate which files are defined in viewconfig, if any.
# '''
# return None
#@calculated_property(schema={
# "title": "ViewConfig Tileset UIDs",
# "description": "List of UIDs which are defined in ViewConfig",
# "type": "array",
# "items" : {
# "type" : "string"
# }
#})
#def viewconfig_tileset_uids(self, request):
# '''
# TODO: Calculate which tilesetUids are defined in viewconfig, if any.
# '''
# return None
@calculated_property(schema={
"title": "File Type",
"description": "Type of this Item (unused)",
"type": "string"
})
def filetype(self, request):
return "HiglassViewConfig"
class Collection(Item.Collection):
'''
This extension of the default Item collection allows any User to create a new version of these.
Emulates base.py Item collection setting of self.__acl__
TODO:
Eventually we can move this up to UserContent or replicate it on JupyterNotebook if want any
User to be able to create new one.
'''
def __init__(self, *args, **kw):
super(HiglassViewConfig.Collection, self).__init__(*args, **kw)
self.__acl__ = ALLOW_ANY_USER_ADD
@collection(
name='microscope-configurations',
properties={
'title': 'Microscope Configurations',
'description': 'Collection of Metadata for microscope configurations of various Tiers',
})
class MicroscopeConfiguration(UserContent):
"""The MicroscopeConfiguration class that holds configuration of a microscope."""
item_type = 'microscope_configuration'
schema = load_schema('encoded:schemas/microscope_configuration.json')
STATUS_ACL = {
'released' : ALLOW_CURRENT,
'deleted' : DELETED,
'draft' : ALLOW_OWNER_EDIT + ALLOW_LAB_SUBMITTER_EDIT,
'released to project' : ALLOW_VIEWING_GROUP_VIEW
}
def _update(self, properties, sheets=None):
if properties.get('microscope'):
microscope = properties.get('microscope')
# set microscope ID if empty
if not microscope.get('ID'):
microscope['ID'] = str(uuid4())
# always sync item's description to microscope's description
microscopeDesc = microscope.get('Description', '')
properties['description'] = microscopeDesc
super(MicroscopeConfiguration, self)._update(properties, sheets)
@calculated_property(schema={
"title": "Display Title",
"description": "A calculated title for every object in 4DN",
"type": "string"
})
def display_title(self, microscope, title=None):
return title or microscope.get("Name")
class Collection(Item.Collection):
'''
This extension of the default Item collection allows any User to create a new version of these.
Emulates base.py Item collection setting of self.__acl__
'''
def __init__(self, *args, **kw):
super(MicroscopeConfiguration.Collection, self).__init__(*args, **kw)
self.__acl__ = ALLOW_ANY_USER_ADD
@collection(
name='image-settings',
properties={
'title': 'Image Settings',
'description': 'Listing of ImageSetting Items.',
})
class ImageSetting(UserContent):
"""Image Settings class."""
item_type = 'image_setting'
schema = load_schema('encoded:schemas/image_setting.json')
STATUS_ACL = {
'released' : ALLOW_CURRENT,
'deleted' : DELETED,
'draft' : ALLOW_OWNER_EDIT + ALLOW_LAB_SUBMITTER_EDIT,
'released to project' : ALLOW_VIEWING_GROUP_VIEW
}
class Collection(Item.Collection):
'''
This extension of the default Item collection allows any User to create a new version of these.
Emulates base.py Item collection setting of self.__acl__
'''
def __init__(self, *args, **kw):
super(ImageSetting.Collection, self).__init__(*args, **kw)
self.__acl__ = ALLOW_ANY_USER_ADD
def get_local_file_contents(filename, contentFilesLocation=None):
if contentFilesLocation is None:
full_file_path = filename
else:
full_file_path = contentFilesLocation + '/' + filename
if not os.path.isfile(full_file_path):
return None
file = open(full_file_path, encoding="utf-8")
output = file.read()
file.close()
return output
def get_remote_file_contents(uri):
resp = requests.get(uri)
return resp.text
|
from ...frontend.entry import ScatteringEntry
class ScatteringEntry1D(ScatteringEntry):
def __init__(self, *args, **kwargs):
super().__init__(name='1D', class_name='scattering1d', *args, **kwargs)
__all__ = ['ScatteringEntry1D']
|
import pyttsx3 as pyttsx
class speech():
def __init__(self):
self.voice = 0
self.se = pyttsx.init('sapi5')
self.se.setProperty('rate', 200)
self.voices = self.se.getProperty('voices')
def speak(self, text, print_text=True):
if print_text:
print(text)
self.se.say(text)
self.se.runAndWait()
def change_voice(self):
self.voice = (self.voice+1)%2
self.se.setProperty('voice', self.voices[self.voice].id)
def set_voice(self, i):
self.voice = i
self.se.setProperty('voice', self.voices[i].id)
|
import json
import hashlib
import pytest
@pytest.mark.skip('datasets')
@pytest.mark.models(
'backends/postgres/report/:dataset/test',
)
def test_push_same_model(model, app):
app.authmodel(model, ['insert'])
data = [
{'_op': 'insert', '_type': model, 'status': 'ok'},
{'_op': 'insert', '_type': model, 'status': 'warning'},
{'_op': 'insert', '_type': model, 'status': 'critical'},
{'_op': 'insert', '_type': model, 'status': 'blocker'},
]
headers = {'content-type': 'application/x-ndjson'}
payload = (json.dumps(x) + '\n' for x in data)
resp = app.post('/', headers=headers, data=payload)
resp = resp.json()
data = resp.pop('_data')
assert resp == {
'_transaction': resp['_transaction'],
'_status': 'ok',
}
assert len(data) == 4
assert data[0] == {
'_id': data[0]['_id'],
'_revision': data[0]['_revision'],
'_type': 'backends/postgres/report/:dataset/test',
'count': None,
'notes': [],
'operating_licenses': [],
'report_type': None,
'revision': None,
'status': 'ok',
'update_time': None,
'valid_from_date': None,
}
def sha1(s):
return hashlib.sha1(s.encode()).hexdigest()
@pytest.mark.skip('datasets')
def test_push_different_models(app):
app.authorize(['spinta_set_meta_fields'])
app.authmodel('country/:dataset/csv/:resource/countries', ['insert'])
app.authmodel('backends/postgres/report/:dataset/test', ['insert'])
data = [
{'_op': 'insert', '_type': 'country/:dataset/csv', '_id': sha1('lt'), 'code': 'lt'},
{'_op': 'insert', '_type': 'backends/postgres/report/:dataset/test', 'status': 'ok'},
]
headers = {'content-type': 'application/x-ndjson'}
payload = (json.dumps(x) + '\n' for x in data)
resp = app.post(f'/', headers=headers, data=payload)
resp = resp.json()
assert '_data' in resp, resp
data = resp.pop('_data')
assert resp == {
'_transaction': resp.get('_transaction'),
'_status': 'ok',
}
assert len(data) == 2
d = data[0]
assert d == {
'_id': d['_id'],
'_revision': d['_revision'],
'_type': 'country/:dataset/csv/:resource/countries',
'code': 'lt',
'title': None,
}
d = data[1]
assert d == {
'_id': d['_id'],
'_revision': d['_revision'],
'_type': 'backends/postgres/report/:dataset/test',
'count': None,
'notes': [],
'operating_licenses': [],
'report_type': None,
'revision': None,
'status': 'ok',
'update_time': None,
'valid_from_date': None,
}
|
"""Contains functionality related to retrieving the content to check against
certain rules.
This module includes content providers, which are classes that know how to
look for information from Github, that is necessary for performing checks.
Ideally this should be abstracted so that it does not depend on Github,
but rather any Git service. For now this is tightly coupled with
the Github functionality.
"""
from functools import lru_cache
from typing import Type, Union
from github.PullRequest import PullRequest
from totem.checks.checks import (
TYPE_BRANCH_NAME,
TYPE_COMMIT_MESSAGE,
TYPE_PR_BODY_CHECKLIST,
TYPE_PR_BODY_EXCLUDES,
TYPE_PR_BODY_INCLUDES,
TYPE_PR_TITLE,
)
from totem.checks.content import (
BaseContentProvider,
BaseGitServiceContentProviderFactory,
)
from totem.checks.core import Check
from totem.github import github_service
from totem.reporting.pr import PRCommentReport
class GithubContentProvider(BaseContentProvider):
"""A base class for all content providers that use Github.
Provides some convenience functionality.
"""
def __init__(self, **params):
"""Constructor.
The caller can specify any number of custom parameters that are necessary
for retrieving the proper content.
"""
super().__init__(**params)
@lru_cache(maxsize=None)
def get_pr(self) -> PullRequest:
"""Return the pull request object.
:rtype: github.PullRequest.PullRequest
"""
return github_service().get_pr(self.repo_name, self.pr_number)
class GithubPRContentProvider(GithubContentProvider):
"""Retrieves information of a pull request from Github.
Contains all information that is necessary to perform related checks.
Makes only one request to the Github API.
If a check object needs more information that is available without doing
any extra request, the information should be added here in new keys
in the returned dictionary. If extra requests are necessary, a new content
provider subclass must be created, to avoid making redundant requests
for all PR-based content providers.
"""
@lru_cache(maxsize=None)
def get_content(self) -> dict:
"""Return a dictionary that contains various information about the PR."""
pr = self.get_pr()
return {'branch': pr.head.ref, 'title': pr.title, 'body': pr.body}
def create_pr_comment(self, body: str) -> dict:
"""Create a comment on a pull request.
:param str body: the body of the comment
:return: a dictionary with information about the created comment
:rtype: dict
"""
if self.repo_name is None:
return {}
if self.pr_number is None:
return {}
return github_service().create_pr_comment(self.repo_name, self.pr_number, body)
def delete_previous_pr_comment(self, latest_comment_id: int) -> bool:
"""Delete the previous totem comment on the PR.
Only deletes 1 comment. `latest_comment_id` is given
for ensuring that the newest comment will not be deleted.
:param int latest_comment_id: the ID of the comment to leave intact
:return: True if the previous comment was deleted, False otherwise
:rtype: bool
"""
if self.repo_name is None:
return False
if self.pr_number is None:
return False
comments = github_service().get_pr_comments(self.repo_name, self.pr_number)
comments = [
x
for x in comments
if x['body'].startswith(PRCommentReport.TITLE)
and x['id'] != latest_comment_id
]
comments = sorted(comments, key=lambda c: c['updated_at'])
if not len(comments):
return False
comment = comments[-1]
return github_service().delete_pr_comment(
self.repo_name, self.pr_number, comment['id']
)
class PRCommitsContentProvider(GithubContentProvider):
"""Retrieves information of all commits of a pull request from Github.
Contains all information that is necessary to perform related on commit
checks. Makes one request to the Github API for retrieving the PR info
(if not already cached) and another request for retrieving the commit info.
If a check object needs more information that is available without doing
any extra request, the information should be added here in new keys
in the returned dictionary. If extra requests are necessary, a new content
provider subclass must be created, to avoid making redundant requests
for all PR-based content providers.
"""
@lru_cache(maxsize=None)
def get_content(self) -> dict:
"""Return a dictionary that contains various information about the commits."""
commits = self.get_pr().get_commits()
return {
'commits': [
{
'message': commit.commit.message,
'sha': commit.sha,
'url': commit.html_url,
'stats': {
'additions': commit.stats.additions,
'deletions': commit.stats.deletions,
'total': commit.stats.total,
},
}
for commit in commits
]
}
class GithubContentProviderFactory(BaseGitServiceContentProviderFactory):
"""Responsible for creating the proper content provider for every type of check,
specifically for the Github service.
It's part of a mechanism for lazy retrieval of content from Github.
The factory (instantly) creates provider objects that know how to get
that content, but they don't start fetching it immediately. Anyone
that gets hold of a provider object can command it to retrieve the content,
which is an operation that might take time, since it often requires HTTP requests
to the remote service.
Allows clients to add custom functionality by registering new providers,
associated with certain configuration types.
"""
def create(self, check: Check) -> Union[BaseContentProvider, None]:
"""Return a content provider that can later provide all required content
for a certain check to execute its actions.
:param Check check: the check object to create a content provider for
:return: a content provider
:rtype: BaseContentProvider
"""
params = {'repo_name': self.repo_name, 'pr_num': self.pr_num}
cls: Type[BaseContentProvider] = self._providers.get(check.check_type, None)
if cls is None:
return None
return cls(**params)
def _get_defaults(self) -> dict:
return {
TYPE_BRANCH_NAME: GithubPRContentProvider,
TYPE_PR_BODY_CHECKLIST: GithubPRContentProvider,
TYPE_PR_TITLE: GithubPRContentProvider,
TYPE_PR_BODY_EXCLUDES: GithubPRContentProvider,
TYPE_PR_BODY_INCLUDES: GithubPRContentProvider,
TYPE_COMMIT_MESSAGE: PRCommitsContentProvider,
}
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.relpath("."))
from tools import parse_json_output, check_results
import warnings
warnings.simplefilter("ignore")
nb_runs = 3
algos = ["ucb_ds2", "ucb_ds", "ucb_d", "ucb"]
algos_names = ["UCB-MS2", "UCB-MS", "UCB-M", "UCB"] # "Distributed" becomes "Multi-party"
scenarios = ["Jester-small", "Jester-large", "MovieLens"]
DIR_OUT = "real-world-data-output/"
os.system("mkdir -p " + DIR_OUT)
N = 100000
aggregates_time = dict()
for algo in algos:
aggregates_time[algo] = list()
for scenario in scenarios:
input_file = "real-world-data/" + scenario + ".txt"
aggregates_all = dict()
R_list = dict()
for algo in algos:
print ("*" * 10 + " Scenario=", scenario, "N=", N, "algo=", algo)
output_file = DIR_OUT + "scenario" + scenario + "_N_" + str(N) + "_" + algo + ".txt"
#os.system("python3 " + algo + ".py " + str(nb_runs) + " " + str(N) + " " + input_file + " " + output_file + " 0")
R_list[algo], aggregate_time, aggregates_all[algo] = parse_json_output(output_file)
aggregates_time[algo].append(aggregate_time)
# check that all algorithms give the same cumulative reward
check_results(R_list, algos)
# plot bars
plt.figure(figsize=(14, 2.5))
plt.rcParams.update({'font.size':14})
x = np.arange(len(scenarios)) # the label locations
plt.xticks(x, scenarios)
width = 0.5 # the width of the bars
plt.bar(x + 2*width/3, aggregates_time["ucb_ds2"], width/3, label='UCB-DS2', color='none', edgecolor='blue', hatch="--")
plt.bar(x + width/3, aggregates_time["ucb_ds"], width/3, label='UCB-DS', color='none', edgecolor='red', hatch="//")
plt.bar(x , aggregates_time["ucb_d"], width/3, label='UCB-D', color='none', edgecolor='green', hatch="||")
plt.bar(x - width/3, aggregates_time["ucb"], width/3, label='UCB', color='none', edgecolor='black')
plt.yscale('log')
plt.ylabel('Time (seconds)')
plt.legend(algos_names, bbox_to_anchor=(0.8,1.1), ncol=4)
plt.savefig(DIR_OUT + "plot_real_world_data.pdf")
|
"""GraphQL Schema."""
from ariadne import gql
TYPE_DEFS = gql("""
type Query {
map(name: String!): Map
maps: [Map]
civilization(id: Int!, dataset_id: Int!): Civilization
civilizations(dataset_id: Int!): [Civilization]
stats: Stats
match(id: Int!): Match
search: SearchResult
search_options: SearchOptions
event(id: String!): Event
events: [Event]
series(id: String!): Series
datasets: [Dataset]
platforms: [Platform]
meta_ladders(platform_id: String!, ladder_ids: [Int]): [Ladder]
user(id: String!, platform_id: String!): User
report(year: Int!, month: Int!, limit: Int = 25): Report
reports: [ReportOption]
person(id: Int!): Person
people: [Person]
latest: Latest
latest_summary: [LatestSummary]
}
type Subscription {
stats: LiveStats
}
type LiveStats {
match_count: Int!
latest_summary: [LatestSummary]
}
type Latest {
matches(dataset_id: Int!, order: [String], offset: Int = 0, limit: Int = 10): Hits
}
type LatestSummary {
dataset: Dataset
version: String!
count: Int!
}
type SearchResult {
matches(params: Dict!, order: [String], offset: Int = 0, limit: Int = 10): Hits
}
type StatImprovement {
user: User
rank: Int!
min_rate: Int!
max_rate: Int!
diff_rate: Int!
count: Int!
wins: Int!
losses: Int!
}
type StatUser {
user: User
rank: Int!
change: Int
count: Int!
}
type StatMap {
map: Map
rank: Int!
count: Int!
percent: Float!
}
type Report {
total_matches: Int!
total_players: Int!
most_matches: [StatUser]
popular_maps: [StatMap]
longest_matches: [Match]
rankings(platform_id: String!, ladder_id: Int!, limit: Int = 25): [Rank]
most_improvement(platform_id: String!, ladder_id: Int!, limit: Int = 25): [StatImprovement]
}
type ReportOption {
year: Int!
month: Int!
}
type Ladder {
id: Int!
platform_id: String!
name: String!
ranks(limit: Int = 5): [Rank]
}
type Rank {
rank: Int!
rating: Int!
streak: Int
change: Int
platform_id: String!
ladder_id: Int!
ladder: Ladder
user: User
rate_by_day: [StatDate]
}
type Hits {
count: Int!
hits: [Match]
}
type Stats {
match_count: Int!
series_count: Int!
player_count: Int!
map_count: Int!
datasets: [StatItem]
platforms: [StatItem]
diplomacy: [StatItem]
languages: [StatItem]
types: [StatItem]
by_day: [StatDate]
}
type SearchOptions {
general: SearchOptionsGeneral
civilizations(dataset_id: Int!): [KeyValue]
versions(dataset_id: Int!): [KeyValue]
ladders(platform_id: String!): [KeyValue]
}
type SearchOptionsGeneral {
team_sizes: [KeyValue]
diplomacy_types: [KeyValue]
game_types: [KeyValue]
mirror: [KeyValue]
rated: [KeyValue]
rms_zr: [KeyValue]
playback: [KeyValue]
events: [KeyValue]
tournaments: [KeyValue]
winner: [KeyValue]
mvp: [KeyValue]
colors: [KeyValue]
datasets: [KeyValue]
platforms: [KeyValue]
civilizations: [KeyValue]
ladders: [KeyValue]
}
type KeyValue {
value: String!
label: String!
}
type StatItem {
name: String!
count: Int!
}
type StatDate {
date: Datetime
count: Int
}
type Map {
builtin: Boolean!
name: String!
count: Int!
percent: Float!
events: [Event]
preview_url: String
matches(order: [String], offset: Int = 0, limit: Int = 10): Hits
top_civilizations(limit: Int = 3): [Civilization]
}
type Civilization {
id: Int!
dataset_id: Int!
name: String!
count: Int!
percent: Float!
bonuses: [CivilizationBonus]
matches(order: [String], offset: Int = 0, limit: Int = 10): Hits
}
type CivilizationBonus {
type: String!
description: String!
}
type File {
id: Int!
match_id: Int!
original_filename: String!
size: Int!
language: String!
encoding: String!
owner: Player!
download_link: String!
}
type Match {
id: Int!
map_name: String!
rms_seed: Int
rms_custom: Int
guard_state: Boolean
fixed_positions: Boolean
direct_placement: Boolean
effect_quantity: Boolean
map_events: [Event]
duration: Datetime
duration_secs: Int
played: Datetime
added: Datetime
has_playback: Boolean
rated: Boolean
diplomacy_type: String
team_size: String
cheats: Boolean
population_limit: Int
lock_teams: Boolean
mirror: Boolean
dataset_version: String
version: String
game_version: String
save_version: String
build: String
postgame: Boolean
platform_match_id: String
winning_team_id: Int
players: [Player]
teams: [Team]
winning_team: Team
losing_team: Team
chat: [Chat]
files: [File]
difficulty: String!
type: String!
map_size: String!
map_reveal_choice: String!
speed: String!
starting_resources: String!
starting_age: String!
victory_condition: String!
dataset: Dataset
platform: Platform
ladder: Ladder
event: Event
tournament: Tournament
series: Series
minimap_link: String!
odds: Odds
graph: Graph
market: [MarketPrice]
tribute: [Tribute]
}
type Tribute {
timestamp: Datetime!
timestamp_secs: Int!
from_player: Player!
to_player: Player!
resource: String!
spent: Int!
received: Int!
fee: Int!
}
type MarketPrice {
timestamp: Datetime!
timestamp_secs: Int!
sell_food: Int!
sell_wood: Int!
sell_stone: Int!
buy_food: Int!
buy_wood: Int!
buy_stone: Int!
}
type TrainedCount {
player_number: Int!
object_id: Int!
timestamp_secs: Int!
timestamp: Datetime!
name: String!
count: Int!
}
type Graph {
nodes: [GraphNode]
links: [GraphLink]
}
type GraphNode {
id: Int!
name: String!
color_id: Int
}
type GraphLink {
source: Int!
target: Int!
}
type Odds {
teams: [StatOdds]
teams_and_civilizations: [StatOdds]
civilizations: [StatOdds]
civilizations_and_map: [StatOdds]
teams_and_map: [StatOdds]
}
type StatOdds {
wins: Int!
losses: Int!
percent: Float!
}
type Dataset {
id: Int!
name: String!
}
type Platform {
id: String!
name: String!
url: String
match_url: String
ladders: [Ladder]
}
type Player {
match_id: Int!
team_id: Int!
platform_id: String
user: User
number: Int!
name: String!
color: String!
color_id: Int!
winner: Boolean!
rate_snapshot: Float
rate_before: Float
rate_after: Float
mvp: Boolean
human: Boolean
score: Int
military_score: Int
economy_score: Int
technology_score: Int
society_score: Int
units_killed: Int
units_lost: Int
buildings_razed: Int
buildings_lost: Int
units_converted: Int
food_collected: Int
wood_collected: Int
stone_collected: Int
gold_collected: Int
tribute_sent: Int
tribute_received: Int
trade_gold: Int
relic_gold: Int
feudal_time: Datetime
castle_time: Datetime
imperial_time: Datetime
feudal_time_secs: Int
castle_time_secs: Int
imperial_time_secs: Int
explored_percent: Int
research_count: Int
total_wonders: Int
total_castles: Int
total_relics: Int
villager_high: Int
research: [Research]
civilization: Civilization
timeseries: [Timeseries]
apm: [APM]
map_control: [MapControl]
units_trained: [TrainedCount]
flags: [Flag]
metrics: Metrics
villager_allocation: [VillagerAllocation]
trade_carts: [ObjectCount]
villagers: [ObjectCount]
transactions: [Transaction]
}
type Transaction {
timestamp: Datetime!
timestamp_secs: Int!
sold_resource: String!
sold_amount: Int!
bought_resource: String!
bought_amount: Int
}
type ObjectCount {
timestamp: Datetime!
timestamp_secs: Int!
count: Int!
}
type MapControl {
timestamp: Datetime!
timestamp_secs: Int!
control_percent: Int!
}
type APM {
timestamp: Datetime!
timestamp_secs: Int!
actions: Int!
}
type VillagerAllocation {
timestamp: Datetime!
timestamp_secs: Int!
name: String!
count: Int!
}
type Metrics {
total_tcs: Int!
average_floating_resources: Int!
dark_age_tc_idle: Datetime!
seconds_housed: Datetime!
seconds_villagers_idle: Datetime
seconds_popcapped: Datetime!
}
type Flag {
type: String!
name: String!
count: Int
evidence: [Evidence]
}
type Evidence {
timestamp: Datetime!
value: String
}
type Timeseries {
timestamp: Datetime
timestamp_secs: Int!
total_food: Int!
total_wood: Int!
total_stone: Int!
total_gold: Int!
population: Float!
military: Float!
percent_explored: Float!
relic_gold: Int!
trade_profit: Int!
tribute_sent: Int!
tribute_received: Int!
value_current_buildings: Int!
value_current_units: Int!
value_lost_buildings: Int!
value_lost_units: Int!
value_objects_destroyed: Int!
value_spent_objects: Int!
value_spent_research: Int!
roi: Float!
damage: Float!
kills: Int!
deaths: Int!
razes: Int!
kd_delta: Int!
}
type Team {
match_id: Int!
team_id: Int!
winner: Boolean!
players: [Player]
}
type Research {
id: Int!
name: String!
started: Datetime!
started_secs: Int!
finished: Datetime
finished_secs: Int
}
type Chat {
player: Player!
message: String!
audience: String!
origination: String!
timestamp: Datetime!
}
type Event {
id: String!
year: Int!
name: String!
tournaments: [Tournament]
maps: [EventMap]
players: [EventPlayer]
civilizations: [EventCivilization]
}
type EventPlayer {
player: Player!
match_count: Int!
win_percent: Float!
average_duration: Datetime!
most_played_civilization: Civilization!
most_played_map: String!
}
type EventMap {
map: Map!
match_count: Int!
played_percent: Float!
average_duration: Datetime!
most_played_civilization: Civilization!
}
type EventCivilization {
civilization: Civilization!
match_count: Int!
win_percent: Float!
average_duration: Datetime!
most_played_map: String!
}
type Tournament {
id: String!
event_id: String!
name: String!
event: Event
series: [Series]
}
type Series {
id: String!
tournament_id: Int!
played: Datetime!
name: String!
sides: [Side]
tournament: Tournament
participants: [Participant]
match_ids: [Int!]
matches(order: [String], offset: Int = 0, limit: Int = 10): Hits
}
type Side {
series_id: Int!
name: String!
score: Int
winner: Boolean
users: [User]
}
type Participant {
series_id: Int!
name: String
score: Int
winner: Boolean
}
type User {
id: String!
platform_id: String!
platform: Platform
name: String!
person: Person
meta_ranks(ladder_ids: [Int]): [Rank]
matches(order: [String], offset: Int = 0, limit: Int = 10): Hits
top_map: Map
top_civilization: Civilization
top_dataset: Dataset
}
type Person {
id: Int!
country: String
name: String!
first_name: String
last_name: String
birthday: Datetime
age: Int
earnings: Float
esportsearnings_id: Int
aoeelo_id: Int
aoeelo_rank: Int
aoeelo_rate: Int
liquipedia: String
portrait_link: String
twitch: String
mixer: String
douyu: String
youtube: String
discord: String
match_count: Int!
first_year: Int!
last_year: Int!
aliases: [String]
accounts: [User]
events: [Event]
matches(order: [String], offset: Int = 0, limit: Int = 10): Hits
}
type Mutation {
upload(rec_file: Upload!): UploadResult!
}
type UploadResult {
success: Boolean!
message: String
match_id: Int
}
scalar Datetime
scalar Dict
scalar Upload
""")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module to provide abstract Empower socket
"""
import socket
import argparse
import logging
import logging.config
import yaml
import coloredlogs
__author__ = 'Giuseppe Cogoni'
__author__ = 'Brent Maranzano'
__license__ = 'MIT'
class Empower(object):
""" Mock Waters' Patrol socket API interface.
"""
def __init__(self):
""" Start the logger, start listening on a socket.
"""
self._setup_logger()
def _setup_logger(self, config_file='./logger_conf.yml'):
""" Start the logger using the provided configuration file.
This function is utilized internally only and it is executed within the ``__init__`` method.
Arguments:
config_file (yml file): Configuration file for logger.
"""
try:
with open(config_file, 'rt') as file_obj:
config = yaml.safe_load(file_obj.read())
logging.config.dictConfig(config)
coloredlogs.install(level='DEBUG')
except Exception as e:
print(e)
self._logger = logging.getLogger('empower_logger')
self._logger.debug('Empower logger setup.')
def run(self, socket_host=None, socket_port=None):
""" Create a very simple echo socket server.
Arguments:
socket_host (str): IP address or host name of host, e.g.: '0.0.0.0'.
socket_port (int): Port number for socket server to listen, e.g.: 54756.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((socket_host, socket_port))
s.listen()
self._logger.debug('Socket server listening on {}:{}'.format(socket_host,
socket_port))
conn, addr = s.accept()
with conn:
self._logger.debug('Connected by: {}'.format(addr))
while True:
data = conn.recv(1024)
self._logger.debug('Empower socket received: {}'.format(
data.decode(encoding='UTF-8')))
if not data:
break
conn.sendall(data)
#Main routine
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='mock Empower socket server')
parser.add_argument(
'--socket_ip',
help='host address for the socket to bind',
type=str,
default='0.0.0.0'
)
parser.add_argument(
'--socket_port',
help='port number for the socket server',
type=int,
default=54756
)
args = parser.parse_args()
empower = Empower()
empower.run(socket_host=args.socket_ip, socket_port=args.socket_port)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.