content
stringlengths 5
1.05M
|
|---|
"""
This file defines the L-Op and R-Op, based on https://j-towns.github.io/2017/06/12/A-new-trick.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def Lop(nodes, x, v):
lop_out = tf.gradients(nodes, x, grad_ys=v)
return lop_out
def Rop(nodes, x, v):
if isinstance(nodes, list):
u = [tf.ones_like(node) for node in nodes]
else:
u = tf.ones_like(nodes)
rop_out = tf.gradients(
Lop(nodes, x, u),u,grad_ys=v)
return rop_out
|
from django.apps import AppConfig
class ChoresConfig(AppConfig):
name = 'chores'
|
"""Compute depth maps for images in the input folder.
"""
from operator import eq
import os
import glob
from numpy.core.defchararray import mod
from numpy.lib.shape_base import tile
from serial.win32 import EV_BREAK
from timm.models import senet
from timm.models.byobnet import num_groups
import torch
from torch.functional import lu_unpack
import utils
import cv2
import sys
import threading
import serial
from sys import platform
import argparse
import matplotlib.pyplot as plt
import numpy as np
from torchvision.transforms import Compose
from midas.dpt_depth import DPTDepthModel
from midas.midas_net import MidasNet
from midas.midas_net_custom import MidasNet_small
from midas.transforms import Resize, NormalizeImage, PrepareForNet
import math
from cameraToWorld import CtoWorld
from numpy.linalg import solve
import time
ser = serial.Serial('COM9', 9600)
# 摄像头与北方向的夹角
angle = 345
ar_min = 135
ar_max = 180
radius = 500.0
mequipment = {"TV":[200,0.0,0.0,0.0]}
radius = {"TV": 500, "AIR": 200, "AUDIO": 200, "FAN": 200, "SWEEPER": 200 }
firstnode = [0,0]
def run(img_name, output_path, model_type, model, optimize=True):
"""Run MonoDepthNN to compute depth maps.
Args:
img_name: catch picture
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
# load network
if model_type == "dpt_large": # DPT-Large
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": #DPT-Hybrid
net_w, net_h = 384, 384
resize_mode="minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "midas_v21":
net_w, net_h = 384, 384
resize_mode="upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
elif model_type == "midas_v21_small":
net_w, net_h = 256, 256
resize_mode="upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
else:
print(f"model_type '{model_type}' not implemented, use: --model_type large")
assert False
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method=resize_mode,
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
model.eval()
if optimize==True:
# rand_example = torch.rand(1, 3, net_h, net_w)
# model(rand_example)
# traced_script_module = torch.jit.trace(model, rand_example)
# model = traced_script_module
if device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
# input
img = utils.read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize==True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
prediction = model.forward(sample)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
)
.squeeze()
.cpu()
.numpy()
)
# output
filename = os.path.join(
output_path, "result"
)
# cv2.namedWindow('imagedepth', cv2.WINDOW_NORMAL)
# cv2.imshow('image',prediction)
# cv2.waitKey(0)
mdepth = utils.write_depth(filename, prediction, bits=2)
print("finished")
return mdepth
def processOpenpose(image,op):
# Custom Params (refer to include/openpose/flags.hpp for more parameters)
params = dict()
params["model_folder"] = "../../../models/"
# Construct it from system arguments
# op.init_argv(args[1])
# oppython = op.OpenposePython()
# Add others in path?
params["net_resolution"] = "320x176"
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
imageToProcess = image
# Process Image
datum = op.Datum()
# imageToProcess = cv2.imread(img)
datum.cvInputData = imageToProcess
opWrapper.emplaceAndPop(op.VectorDatum([datum]))
# Display Image
# print("Body keypoints: \n" + str(datum.poseKeypoints))
# cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
# cv2.waitKey(0)
print("keypointtype",type(datum.cvOutputData))
cv2.imwrite("keypoint.jpg",datum.cvOutputData) # 保存路径
return datum.poseKeypoints
# # 图片大小为320x426
# tink = np.ones((426,320),dtype='float64')
# tink = tink
# print(tink.shape)
# for i in range(datum.poseKeypoints.shape[0]):
# for j in range(datum.poseKeypoints.shape[1]):
# x = datum.poseKeypoints[i][j][0]
# y = datum.poseKeypoints[i][j][1]
# if y>426 or x>320:
# continue
# score = datum.poseKeypoints[i][j][2]
# #color = score
# color = 1
# print("x,y",int(y),int(x))
# tink[int(y)][int(x)] = 240 * color / 25
# tink[int(y)+1][int(x)] = 240 * color / 25
# tink[int(y)][int(x)+1] = 240 * color / 25
# tink[int(y)-1][int(x)] = 240 * color / 25
# tink[int(y)][int(x)-1] = 240 * color / 25
# tink[int(y) + 1][int(x)+1] = 240 * color / 25
# tink[int(y)-1][int(x) + 1] = 240 * color / 25
# tink[int(y) - 1][int(x)-1] = 240 * color / 25
# tink[int(y) + 1][int(x) - 1] = 240 * color / 25
# plt.imshow(tink,cmap="gray")
# plt.axis('off')
# plt.show()
def isSend(l_head,l_mid,l_tail,r_head,r_mid,r_tail,label):
"""Three points on one line
label = 0:失效 1:只有右手 2:只有左手 3:两只手
Args:
Keypoints :Three points
"""
invalid = np.array([])
if label == 0:
return invalid
elif label == 1:
# right hand
print("head tail",r_head[1],r_tail[1])
a_2 =(r_head[0] - r_mid[0])**2 + (r_head[1] - r_mid[1])**2 + (r_head[2] - r_mid[2])**2
b_2 =(r_tail[0] - r_mid[0])**2 + (r_tail[1] - r_mid[1])**2 + (r_tail[2] - r_mid[2])**2
c_2 =(r_head[0] - r_tail[0])**2 + (r_head[1] - r_tail[1])**2 + (r_head[2] - r_tail[2])**2
r_angle = math.degrees(math.acos((a_2 + b_2 - c_2)/(2 * math.sqrt(a_2) * math.sqrt(b_2))))
print("rangle",r_angle)
if ar_min < r_angle < ar_max:
return r_head
else:
return invalid
elif label == 2:
#left hand
print("head tail",l_head[1],l_tail[1])
a_2 =(l_head[0] - l_mid[0])**2 + (l_head[1] - l_mid[1])**2 + (l_head[2] - l_mid[2])**2
b_2 =(l_tail[0] - l_mid[0])**2 + (l_tail[1] - l_mid[1])**2 + (l_tail[2] - l_mid[2])**2
c_2 =(l_head[0] - l_tail[0])**2 + (l_head[1] - l_tail[1])**2 + (l_head[2] - l_tail[2])**2
l_angle = math.degrees(math.acos((a_2 + b_2 - c_2)/(2 * math.sqrt(a_2) * math.sqrt(b_2))))
print("langle",l_angle)
if ar_min < l_angle < ar_max:
return l_head
else:
return invalid
elif label == 3:
#left hand
print("head tail",l_head[1],l_tail[1])
a_2 =(l_head[0] - l_mid[0])**2 + (l_head[1] - l_mid[1])**2 + (l_head[2] - l_mid[2])**2
b_2 =(l_tail[0] - l_mid[0])**2 + (l_tail[1] - l_mid[1])**2 + (l_tail[2] - l_mid[2])**2
c_2 =(l_head[0] - l_tail[0])**2 + (l_head[1] - l_tail[1])**2 + (l_head[2] - l_tail[2])**2
l_angle = math.degrees(math.acos((a_2 + b_2 - c_2)/(2 * math.sqrt(a_2) * math.sqrt(b_2))))
print("langle",l_angle)
# right hand
print("head tail",r_head[1],r_tail[1])
a_2 =(r_head[0] - r_mid[0])**2 + (r_head[1] - r_mid[1])**2 + (r_head[2] - r_mid[2])**2
b_2 =(r_tail[0] - r_mid[0])**2 + (r_tail[1] - r_mid[1])**2 + (r_tail[2] - r_mid[2])**2
c_2 =(r_head[0] - r_tail[0])**2 + (r_head[1] - r_tail[1])**2 + (r_head[2] - r_tail[2])**2
r_angle = math.degrees(math.acos((a_2 + b_2 - c_2)/(2 * math.sqrt(a_2) * math.sqrt(b_2))))
print("rangle",r_angle)
if ar_min < l_angle < ar_max and ar_min < r_angle < ar_max:
if l_head[2] > r_head[2]:
return l_head
else:
return r_head
elif ar_min < l_angle < ar_max and r_angle <= ar_min:
return l_head
elif l_angle <= ar_min and ar_min < r_angle < ar_max:
return r_head
else:
return invalid
def gtDepth(depth):
# a = -2.13798
# b = 3622.8536
a = -0.43476123
b = 1647.1930877842856
return a * depth + b
def target_not(unot,uvector):
# 需要知道在哪一个面碰壁
# 比如y
tx = uvector[0] * (-unot[1]) / uvector[1] + unot[0]
tz = uvector[2] * (-unot[1]) / uvector[1] + unot[2]
return tx,tz
def distance(value,points,vector):
P1 = np.array([value[1],value[2],value[3]])
P2 = np.array(points).reshape(1,-1)
# A和B两个向量尾部相连
A = P1 - P2
B = np.array(vector)
# 计算叉乘
A_B = np.cross(A, B)
# 计算叉乘的膜
AB_mo = np.linalg.norm(A_B)
B_mo = np.linalg.norm(B)
dist = AB_mo / B_mo
return dist
def get_eq(name):
# 需要知道在哪一个面碰壁
# 比如y
if name == "TV":
return 0
elif name == "AIR":
return 1
elif name == "AUDIO":
return 2
elif name == "FAN":
return 3
elif name == "SWEEPER":
return 4
def destination_calibration(points,vector,model,equipment):
# 两条直线的交点?
if model == "calibration":
#存点
firstnode[0] = points
firstnode[1] = vector
return "a"
elif model == "calibration2":
A = np.array(firstnode[1]).reshape(1,-1)
B = np.array(vector).reshape(1,-1)
P1 = np.array(firstnode[0]).reshape(1,-1)
P2 = np.array(points).reshape(1,-1)
N = np.cross(A, B).reshape(1,-1)
# dest = np.linalg.norm(np.dot(N,P2 - P1)) / np.linalg.norm(N)
a=np.mat([[B[0][0] * N[0][1] - B[0][1] * N[0][0],N[0][0] * A[0][1] - N[0][1] * A[0][0]],[B[0][0] * N[0][2] - B[0][2] * N[0][0], N[0][0] * A[0][2] - N[0][2] * A[0][0]]])#系数矩阵
b=np.mat([N[0][0] * P2[0][1] - P1[0][1] * N[0][0] - P2[0][0] * N[0][1] + P1[0][0] * N[0][1],N[0][0] * P2[0][2] - P1[0][2] * N[0][0] - P2[0][0] * N[0][2] + P1[0][0] * N[0][2]]).T
# m=B,P2 ,t=A,P1
x = np.array(solve(a,b)).reshape(1,-1)#方程组的解
mequipment[equipment] = [radius[equipment],(x[0][0] * B[0][0] + P2[0][0] + x[0][1] * A[0][0] + P1[0][0]) / 2,(x[0][0] * B[0][1] + P2[0][1] + x[0][1] * A[0][1] + P1[0][1]) / 2,(x[0][0] * B[0][2] + P2[0][2] + x[0][1] * A[0][2] + P1[0][2]) / 2]
print("input",equipment,mequipment[equipment])
return "b"
else:
return "c"
def calculate(poseKeypoints,imageDepth,c_to_w,vector,model,equipment):
global out
for i in range(poseKeypoints.shape[0]): # people
left = [7,6,5]
leftKeypoints = []
right = [4,3,2]
left_complete = True
right_complete = True
for j in left:
x = poseKeypoints[i][j][0]
y = poseKeypoints[i][j][1]
if x == 0.0 or y == 0.0:
left_complete = False
leftKeypoints.append([x,y])
# print("left",leftKeypoints)
# print(leftKeypoints[1][0])
rightKeypoints = []
for j in right:
x = poseKeypoints[i][j][0]
y = poseKeypoints[i][j][1]
if x == 0.0 or y == 0.0:
right_complete = False
rightKeypoints.append([x,y])
# print(rightKeypoints)
# print(rightKeypoints[1][0])
# print("pose_and_depth_type",poseKeypoints.shape, imageDepth.shape)
# x, y 是针对于图像坐标系,但是depth是array 先行后列
# right hand
hand_not_x, hand_not_y = int(poseKeypoints[i][4][0]),int(poseKeypoints[i][4][1])
print("2",hand_not_x,hand_not_y)
print("depth",gtDepth(imageDepth[hand_not_y][hand_not_x]),imageDepth[hand_not_y][hand_not_x])
r_head = c_to_w.c_w(c_to_w.pixel_c([hand_not_x,hand_not_y,1],gtDepth(imageDepth[hand_not_y][hand_not_x])))
hand_not_x, hand_not_y = int(poseKeypoints[i][3][0]),int(poseKeypoints[i][3][1])
print("3",hand_not_x,hand_not_y)
r_mid = c_to_w.c_w(c_to_w.pixel_c([hand_not_x,hand_not_y,1],gtDepth(imageDepth[hand_not_y][hand_not_x])))
hand_not_x, hand_not_y = int(poseKeypoints[i][2][0]),int(poseKeypoints[i][2][1])
print("4",hand_not_x,hand_not_y)
r_tail = c_to_w.c_w(c_to_w.pixel_c([hand_not_x,hand_not_y,1],gtDepth(imageDepth[hand_not_y][hand_not_x])))
# left hand
hand_not_x, hand_not_y = int(poseKeypoints[i][7][0]),int(poseKeypoints[i][7][1])
print("2",hand_not_x,hand_not_y)
l_head = c_to_w.c_w(c_to_w.pixel_c([hand_not_x,hand_not_y,1],gtDepth(imageDepth[hand_not_y][hand_not_x])))
hand_not_x, hand_not_y = int(poseKeypoints[i][6][0]),int(poseKeypoints[i][6][1])
print("3",hand_not_x,hand_not_y)
l_mid = c_to_w.c_w(c_to_w.pixel_c([hand_not_x,hand_not_y,1],gtDepth(imageDepth[hand_not_y][hand_not_x])))
hand_not_x, hand_not_y = int(poseKeypoints[i][5][0]),int(poseKeypoints[i][5][1])
print("4",hand_not_x,hand_not_y)
l_tail = c_to_w.c_w(c_to_w.pixel_c([hand_not_x,hand_not_y,1],gtDepth(imageDepth[hand_not_y][hand_not_x])))
# 必须手指都是可以看到的
label = 0
if right_complete == True:
label = label + 1
if left_complete == True:
label = label + 2
print("label",label)
# label = 0:失效 1:只有右手 2:只有左手 3:两只手
w_points = isSend(l_head,l_mid,l_tail,r_head,r_mid,r_tail,label)
# IMU
if w_points.size != 0:
if model == "calibration" or model == "calibration2":
ca_message = destination_calibration(w_points,vector,model,equipment)
for j in range(2):
ser.write(str(ca_message).encode("gbk"))
elif model == "order":
for key,value in mequipment.items():
dis = distance(value,w_points,vector)
print("dis",dis)
if dis < value[0]:
eq = get_eq(key)
print("equipment",eq)
for j in range(2):
ser.write(str(eq).encode("gbk"))
break
else:
# 失效
pass
out = ''
def reads():
""" 读取数据 """
global out
while True:
if out == '':
while ser.inWaiting() > 0:
out += ser.read(1).decode() # 一个一个的读取
if 0xFF == ord('q'):
break
def imu_get(str):
mess = str.split()
print("model",len(mess))
if len(mess) == 3:
model = "calibration"
z = float(mess[1])
x = float(mess[2])
equip = mess[0]
print("imu",z,x)
elif len(mess) == 2:
model = "order"
z = float(mess[0])
x = float(mess[1])
print("imu",z,x)
equip = ""
elif len(mess) == 4:
model = "calibration2"
z = float(mess[1])
x = float(mess[2])
equip = mess[0]
print("imu",z,x)
# 单位向量
# 摄像头与北的夹角
c_to_n =angle
# 计算角度
# 因为是西 所以是负数
# xita 对于 -y 顺时针为正 逆时针为负c_to_n - (-z)
xita = (c_to_n - z + 270) % 360
fai = x + 90
print("fai",fai,xita)
# 方向单位向量
uz = math.cos(math.radians(fai))
print("uz",uz)
uy = math.sin(math.radians(xita)) * math.sin(math.radians(fai))
ux = math.cos(math.radians(xita)) * math.sin(math.radians(fai))
vec = [ux,uy,uz]
print("vtype",vec)
return vec,model,equip
def load_model(model_type,model_path):
# load network
if model_type == "dpt_large": # DPT-Large
model = DPTDepthModel(
path=model_path,
backbone="vitl16_384",
non_negative=True,
)
elif model_type == "dpt_hybrid": #DPT-Hybrid
model = DPTDepthModel(
path=model_path,
backbone="vitb_rn50_384",
non_negative=True,
)
elif model_type == "midas_v21":
model = MidasNet(model_path, non_negative=True)
elif model_type == "midas_v21_small":
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, non_negative=True, blocks={'expand': True})
else:
print(f"model_type '{model_type}' not implemented, use: --model_type large")
assert False
return model
def camera():
global out
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # 打开摄像头
mctoworld = CtoWorld() # 生产矫正对象
model_ = load_model(args.model_type,args.model_weights)
while (1):
# get a frame
ret, frame = cap.read()
# frame = cv2.flip(frame, 1) # 摄像头是和人对立的,将图像左右调换回来正常显示
# frame = cv2.flip(frame, 1) # 摄像头是和人对立的,将图像左右调换回来正常显示
# show a frame
cv2.imshow("capture", frame) # 生成摄像头窗口
if cv2.waitKey(1) and out != '': # 如果按下q 就截图保存并退出
print("okkkk")
print(frame.shape)
x, y = frame.shape[0:2]
# print("x.y",x,y)
imgecroped = cv2.resize(frame, (int(y/4), int(x/4)))
print(imgecroped.shape)
cv2.imwrite("test.jpg", imgecroped) # 保存路径
cv2.destroyAllWindows()
mve,order_model,equipment_c = imu_get(out)
# process openpose
start = time.time()
poseKeypoints = processOpenpose(imgecroped,op)
end = time.time()
print("openpose",end - start)
start = time.time()
# # compute depth maps
imageDepth = run(imgecroped, args.output_path, args.model_type, model_, args.optimize)
end = time.time()
print("depth",end - start)
calculate(poseKeypoints,imageDepth,mctoworld,mve,order_model,equipment_c)
# out = ''
# break
cap.release()
if __name__ == "__main__":
try:
# Import Openpose (Windows/Ubuntu/OSX)
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
# Windows Import
if platform == "win32":
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append(dir_path + '/../../python/openpose/Release')
os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
import pyopenpose as op
else:
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append('../../python')
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
# sys.path.append('/usr/local/python')
from openpose import pyopenpose as op
except ImportError as e:
print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
raise e
except Exception as e:
print(e)
sys.exit(-1)
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path',
default='input',
help='folder with input images'
)
parser.add_argument('-o', '--output_path',
default='output',
help='folder for output images'
)
parser.add_argument('-m', '--model_weights',
default=None,
help='path to the trained weights of model'
)
parser.add_argument('-t', '--model_type',
default='dpt_hybrid',
help='model type: dpt_large, dpt_hybrid, midas_v21_large or midas_v21_small'
)
parser.add_argument('-n', '--net_resolution',
default='240x160',
help='size of image'
)
parser.add_argument('--optimize', dest='optimize', action='store_true')
parser.add_argument('--no-optimize', dest='optimize', action='store_false')
parser.set_defaults(optimize=True)
args = parser.parse_args()
print("canshu",args)
# args = parser.parse_known_args()
# # Custom Params (refer to include/openpose/flags.hpp for more parameters)
# params = dict()
# params["model_folder"] = "../../../models/"
# # Add others in path?
# for i in range(0, len(args[1])):
# curr_item = args[1][i]
# if i != len(args[1])-1: next_item = args[1][i+1]
# else: next_item = "1"
# if "--" in curr_item and "--" in next_item:
# key = curr_item.replace('-','')
# if key not in params: params[key] = "1"
# elif "--" in curr_item and "--" not in next_item:
# key = curr_item.replace('-','')
# if key not in params: params[key] = next_item
default_models = {
"midas_v21_small": "weights/midas_v21_small-70d6b9c8.pt",
"midas_v21": "weights/midas_v21-f6b98070.pt",
"dpt_large": "weights/dpt_large-midas-2f21e586.pt",
"dpt_hybrid": "weights/dpt_hybrid-midas-501f0c75.pt",
}
if args.model_weights is None:
args.model_weights = default_models[args.model_type]
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
out = ''
t1 = threading.Thread(target=reads, name='reads')
t2 = threading.Thread(target=camera, name='camera')
t1.start()
t2.start()
|
from dateutil.tz import UTC
from dateutil.tz import gettz
from yui.apps.info.memo.models import Memo
from yui.utils.datetime import now
def test_memo_model(fx_sess):
now_dt = now()
record = Memo()
record.keyword = 'test'
record.text = 'test test'
record.author = 'U1'
record.created_at = now_dt
with fx_sess.begin():
fx_sess.add(record)
assert record.created_at == now_dt
assert record.created_datetime == now_dt.astimezone(UTC)
assert record.created_timezone == gettz('Asia/Seoul')
|
# -*- coding: utf-8 -*-
from django.db import models
from apps.registro.models.ExtensionAulica import ExtensionAulica
from apps.seguridad.audit import audit
@audit
class ExtensionAulicaMatricula(models.Model):
extension_aulica = models.ForeignKey(ExtensionAulica)
anio = models.IntegerField()
mixto = models.BooleanField()
profesorados = models.PositiveIntegerField(null=True, blank=True)
postitulos = models.PositiveIntegerField(null=True, blank=True)
formacion_docente = models.PositiveIntegerField(null=True, blank=True)
formacion_continua = models.PositiveIntegerField(null=True, blank=True)
tecnicaturas = models.PositiveIntegerField(null=True, blank=True)
total = models.PositiveIntegerField(null=True, blank=True)
class Meta:
app_label = 'registro'
unique_together = ('extension_aulica', 'anio')
db_table = 'registro_extension_aulica_matricula'
def get_formacion_docente(self):
# Formación Docente = Sólo Profesorados + Sólo Postítulos + Formación contínua
tmp = ((self.profesorados or 0) + (self.postitulos or 0)) + (self.formacion_continua or 0)
if tmp <= 0:
return None
return tmp
def get_formacion_continua(self):
# Formación contínua = Total - Sólo Tecnicaturas - Sólo Profesorados - Sólo Postítulos
tmp = ((self.total or 0) - (self.tecnicaturas or 0)) - (self.profesorados or 0) - (self.postitulos or 0) or None
if tmp <= 0:
return None
return tmp
def set_formacion_docente(self):
self.formacion_docente = self.get_formacion_docente()
def set_formacion_continua(self):
self.formacion_continua = self.get_formacion_continua()
|
import unittest
from test.helpers import get_general_wrapper
class Rule1Test(unittest.TestCase):
def test_rule1_negative(self):
context = """
LABEL maintainer="foo <foo@bar.com>"
# A Comment
FROM registry.a.com/acme/centos:7
"""
wrapper = get_general_wrapper(context)
wrapper.rule1()
self.assertEqual(len(wrapper.errors), 1)
self.assertIn("""FROM must be the first instruction""", wrapper.errors[0])
def test_rule1_positive(self):
context = """
# A Comment
FROM registry.a.com/acme/centos:7
"""
wrapper = get_general_wrapper(context)
wrapper.rule1()
self.assertEqual(len(wrapper.errors), 0)
def test_rule1_with_preceding_arg(self):
context = """
ARG release=something
FROM registry.a.com/acme/centos:${release}
"""
wrapper = get_general_wrapper(context)
wrapper.rule1()
self.assertEqual(len(wrapper.errors), 0)
|
#!/usr/bin/env python
import Align
import sys
import pdb
inFile = open(sys.argv[1])
nMatch = 0
nTest = 0
for line in inFile.readlines():
vals = line.split()
seqs = vals[6].split(';')
if (len(seqs) == 2):
(qopt, topt, score) = Align.SWAlign(seqs[0], seqs[1], indel=0,mismatch=0)
ident = float(score) / max(len(seqs[0]), len(seqs[1]))
if (ident > 0.70):
print line
nMatch += 1
nTest +=1
if (nTest % 1000 == 0):
sys.stderr.write(str((nTest, nMatch)) + "\n")
else:
print line
# print str((seqs[0], seqs[1], score))
# print "{} {} {}".format(
|
VERSION="20200103"
|
from utils.discord import help_me, DiscordInteractive
from utils.osu.utils import CalculateMods
from utils.utils import Log
interact = DiscordInteractive.interact
class Command:
command = "ar"
description = "Calculate Approach Rate values and milliseconds with mods applied."
argsRequired = 1
usage = "<ar> [+mods]"
examples = [{
'run': "ar 8 +DT",
'result': "Returns AR of AR8 with DT applied."
},
{
'run': "ar 6.4 +EZ",
'result': "Returns AR of AR6.4 with EZ applied."
}]
synonyms = []
async def call(self, package):
message, args = package["message_obj"], package['args']
try:
hp = float(args[1])
except ValueError:
msg = f"{args[1]} is not a valid ar"
Log.error(msg)
await help_me(message, self.command)
return
except IndexError:
Log.error("No ar provided")
await help_me(message, self.command)
return
mods = args[2].upper() if len(args) > 2 else ""
new_ar, ar_ms, mod_list = CalculateMods(mods).ar(hp)
output = ""
if len(mod_list) > 0:
if hp.is_integer():
hp = int(hp)
output += f"AR{hp}+{''.join(mod_list).upper()} -> "
new_ar = float(f"{new_ar:.2f}")
if new_ar.is_integer():
new_ar = int(new_ar)
output += f"AR{new_ar} ({ar_ms:.0f}ms)"
interact(message.channel.send, output)
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ResourcePrice:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'arch': 'str',
'price': 'float',
'size': 'str',
'type': 'str'
}
attribute_map = {
'arch': 'arch',
'price': 'price',
'size': 'size',
'type': 'type'
}
def __init__(self, arch=None, price=None, size=None, type=None):
"""ResourcePrice - a model defined in huaweicloud sdk"""
self._arch = None
self._price = None
self._size = None
self._type = None
self.discriminator = None
if arch is not None:
self.arch = arch
if price is not None:
self.price = price
if size is not None:
self.size = size
if type is not None:
self.type = type
@property
def arch(self):
"""Gets the arch of this ResourcePrice.
cpu架构 x86|arm
:return: The arch of this ResourcePrice.
:rtype: str
"""
return self._arch
@arch.setter
def arch(self, arch):
"""Sets the arch of this ResourcePrice.
cpu架构 x86|arm
:param arch: The arch of this ResourcePrice.
:type: str
"""
self._arch = arch
@property
def price(self):
"""Gets the price of this ResourcePrice.
价格
:return: The price of this ResourcePrice.
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this ResourcePrice.
价格
:param price: The price of this ResourcePrice.
:type: float
"""
self._price = price
@property
def size(self):
"""Gets the size of this ResourcePrice.
规格。 类型为'storage'时,size值可以为5GB,10GB,20GB。 类型为'cpuMemory'时,arch为'x86',size值可以为1U1G,2U4G;arch为'arm',size值可以为4U8G。
:return: The size of this ResourcePrice.
:rtype: str
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ResourcePrice.
规格。 类型为'storage'时,size值可以为5GB,10GB,20GB。 类型为'cpuMemory'时,arch为'x86',size值可以为1U1G,2U4G;arch为'arm',size值可以为4U8G。
:param size: The size of this ResourcePrice.
:type: str
"""
self._size = size
@property
def type(self):
"""Gets the type of this ResourcePrice.
类型。目前可以取值storage,cpuMemory
:return: The type of this ResourcePrice.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ResourcePrice.
类型。目前可以取值storage,cpuMemory
:param type: The type of this ResourcePrice.
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcePrice):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import os.path as osp
from cvpods.configs.ssd_config import SSDConfig
_config_dict = dict(
MODEL=dict(
WEIGHTS="cvpods/ImageNetPretrained/mmlab/vgg16.pth",
SSD=dict(
IMAGE_SIZE=512,
FEATURE_MAP_SIZE=[64, 32, 16, 8, 4, 2, 1],
DEFAULT_BOX=dict(
SCALE=dict(
CONV4_3_SCALE=0.04,
S_MIN=0.1,
S_MAX=0.9,
),
ASPECT_RATIOS=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]],
CLIP=False,
),
),
),
DATASETS=dict(
TRAIN=("coco_2017_train",),
TEST=("coco_2017_val",),
),
SOLVER=dict(
LR_SCHEDULER=dict(
STEPS=(160000, 180000),
MAX_ITER=200000,
),
OPTIMIZER=dict(
BASE_LR=2e-3,
WEIGHT_DECAY=5e-4,
),
IMS_PER_BATCH=64,
IMS_PER_DEVICE=8,
),
INPUT=dict(
AUG=dict(
TRAIN_PIPELINES=[
("RandomBrightness", dict(
intensity_min=1 - 32.0 / 255,
intensity_max=1 + 32.0 / 255,
prob=0.5,
)),
("RandomContrast", dict(
intensity_min=0.5,
intensity_max=1.5,
prob=0.5,
)),
("RandomSaturation", dict(
intensity_min=0.5,
intensity_max=1.5,
prob=0.5,
)),
("RandomSwapChannels", dict(
prob=0.5,
)),
("Expand", dict(ratio_range=(1, 4),
mean=SSDConfig().MODEL.PIXEL_MEAN, prob=0.5)),
("MinIoURandomCrop", dict(
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
("Resize", dict(shape=(512, 512))),
("RandomFlip", dict()),
],
TEST_PIPELINES=[
("Resize", dict(shape=(512, 512))),
],
),
FORMAT="RGB",
),
TEST=dict(
EVAL_PERIOD=10000,
),
OUTPUT_DIR=osp.join(
'/data/Outputs/model_logs/cvpods_playground',
osp.split(osp.realpath(__file__))[0].split("playground/")[-1]
),
)
class CustomSSDConfig(SSDConfig):
def __init__(self):
super(CustomSSDConfig, self).__init__()
self._register_configuration(_config_dict)
config = CustomSSDConfig()
|
import numpy as np
from sklearn.neural_network import MLPRegressor
import sklearn
from itertools import repeat
import warnings
warnings.filterwarnings("ignore")
from multiprocessing import Pool
class DeepNeuroevolution():
def __init__(self, env, n_individuals, n_parents, n_features, n_actions, nn_architecture, reward_goal):
self._env = env
self._n_individuals = n_individuals
self._n_parents = n_parents
self._n_features = n_features
self._n_actions = n_actions
self._nn_architecture = nn_architecture
self._reward_goal = reward_goal
self._best_score = -10**10
self._n_generations = 0
def find_optimal_network(self):
self._create_first_population()
while not self._is_finished():
self._evaluate_population()
parents = self._create_parents()
self._create_new_population(parents)
self._print_score(parents)
def _is_finished(self):
return self._best_score >= self._reward_goal
def _evaluate_population(self):
for idx, mlp in enumerate(self._current_population):
print('Evaluation of Network', idx)
score = self._evaluate_network(mlp[0], 20)
self._current_population[idx][1] = score
def _evaluate_network(self, mlp, iterations):
score = 0
env = self._env
for _ in range(iterations):
state = env.reset()
old_state = None
done = False
while not done:
state = self._preprocess_state(state)
if (state == old_state).all():
action = np.random.randint(4)
else:
action = np.random.choice(self._n_actions, p=mlp.predict([state])[0])
old_state = state
state, reward, done = env.step(action)
score += reward
return score / iterations
def _preprocess_state(self, state):
new_state = state.flatten()
new_state /= 2048
return new_state
def _create_first_population(self):
self._current_population = []
for _ in range(self._n_individuals):
mlp = MLPRegressor(hidden_layer_sizes = self._nn_architecture, alpha=10**-10, max_iter=1)
mlp.fit([np.random.randn(self._n_features)], [np.random.randn(self._n_actions)])
mlp.out_activation_ = 'softmax'
self._current_population.append([mlp,0])
def _create_parents(self):
parents = sorted(self._current_population, key=lambda x: -x[1])[:self._n_parents]
for idx, mlp in enumerate(parents):
print('Evaluation of parent', idx)
score = self._evaluate_network(mlp[0], 50)
parents[idx][1] = score
parents.sort(key=lambda x:-x[1])
return parents
def _create_new_population(self, parents):
new_population = [parents[0]]
for _ in range(self._n_individuals-1):
idx = np.random.randint(len(parents))
weights, biases = self._compute_new_weights(parents[idx][0])
mlp = self._create_new_nn(weights, biases)
new_population.append([mlp, 0])
self._current_population = new_population
def _create_new_nn(self, weights, biases):
mlp = MLPRegressor(hidden_layer_sizes = self._nn_architecture, alpha=10**-10, max_iter=1)
mlp.fit([np.random.randn(self._n_features)], [np.random.randn(self._n_actions)])
mlp.coefs_ = weights
mlp.intercepts_ = biases
mlp.out_activation_ = 'softmax'
return mlp
def _compute_new_weights(self, parent):
weights = parent.coefs_
biases = parent.intercepts_
new_weights = []
new_biases = []
for weight in weights:
shape = weight.shape
new_weights.append(weight + 10*np.random.randn(shape[0], shape[1]))
for bias in biases:
new_biases.append(bias + 10*np.random.randn(bias.shape[0]))
return new_weights, new_biases
def _print_score(self, parents):
self._best_score = max(self._best_score, parents[0][1])
self._n_generations += 1
print('Results for generation', self._n_generations, '\n')
print('Overall best score is:', self._best_score)
print('Best scores of the current population:')
for i in parents:
print(i[1])
print('\n')
|
from __future__ import annotations
from ctc import spec
def digest_eth_get_compilers(
response: spec.RpcSingularResponse,
) -> spec.RpcSingularResponse:
return response
def digest_eth_compile_lll(
response: spec.RpcSingularResponse,
) -> spec.RpcSingularResponse:
return response
def digest_eth_compile_solidity(
response: spec.RpcSingularResponse,
) -> spec.RpcSingularResponse:
return response
def digest_eth_compile_serpent(
response: spec.RpcSingularResponse,
) -> spec.RpcSingularResponse:
return response
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import time
import tempfile
import re
import traceback
sys.path.insert(0, os.path.abspath(os.path.realpath(os.path.dirname(sys.argv[0]))))
from automation import Automation
from remoteautomation import RemoteAutomation
from runtests import Mochitest
from runtests import MochitestOptions
from runtests import MochitestServer
import devicemanager, devicemanagerADB, devicemanagerSUT
import manifestparser
class RemoteOptions(MochitestOptions):
def __init__(self, automation, scriptdir, **kwargs):
defaults = {}
MochitestOptions.__init__(self, automation, scriptdir)
self.add_option("--remote-app-path", action="store",
type = "string", dest = "remoteAppPath",
help = "Path to remote executable relative to device root using only forward slashes. Either this or app must be specified but not both")
defaults["remoteAppPath"] = None
self.add_option("--deviceIP", action="store",
type = "string", dest = "deviceIP",
help = "ip address of remote device to test")
defaults["deviceIP"] = None
self.add_option("--dm_trans", action="store",
type = "string", dest = "dm_trans",
help = "the transport to use to communicate with device: [adb|sut]; default=sut")
defaults["dm_trans"] = "sut"
self.add_option("--devicePort", action="store",
type = "string", dest = "devicePort",
help = "port of remote device to test")
defaults["devicePort"] = 20701
self.add_option("--remote-product-name", action="store",
type = "string", dest = "remoteProductName",
help = "The executable's name of remote product to test - either fennec or firefox, defaults to fennec")
defaults["remoteProductName"] = "fennec"
self.add_option("--remote-logfile", action="store",
type = "string", dest = "remoteLogFile",
help = "Name of log file on the device relative to the device root. PLEASE ONLY USE A FILENAME.")
defaults["remoteLogFile"] = None
self.add_option("--remote-webserver", action = "store",
type = "string", dest = "remoteWebServer",
help = "ip address where the remote web server is hosted at")
defaults["remoteWebServer"] = None
self.add_option("--http-port", action = "store",
type = "string", dest = "httpPort",
help = "http port of the remote web server")
defaults["httpPort"] = automation.DEFAULT_HTTP_PORT
self.add_option("--ssl-port", action = "store",
type = "string", dest = "sslPort",
help = "ssl port of the remote web server")
defaults["sslPort"] = automation.DEFAULT_SSL_PORT
self.add_option("--pidfile", action = "store",
type = "string", dest = "pidFile",
help = "name of the pidfile to generate")
defaults["pidFile"] = ""
self.add_option("--robocop", action = "store",
type = "string", dest = "robocop",
help = "name of the .ini file containing the list of tests to run")
defaults["robocop"] = ""
self.add_option("--robocop-path", action = "store",
type = "string", dest = "robocopPath",
help = "Path to the folder where robocop.apk is located at. Primarily used for ADB test running")
defaults["robocopPath"] = ""
self.add_option("--robocop-ids", action = "store",
type = "string", dest = "robocopIds",
help = "name of the file containing the view ID map (fennec_ids.txt)")
defaults["robocopIds"] = ""
defaults["remoteTestRoot"] = None
defaults["logFile"] = "mochitest.log"
defaults["autorun"] = True
defaults["closeWhenDone"] = True
defaults["testPath"] = ""
defaults["app"] = None
self.set_defaults(**defaults)
def verifyRemoteOptions(self, options, automation):
options.remoteTestRoot = automation._devicemanager.getDeviceRoot()
productRoot = options.remoteTestRoot + "/" + automation._product
if (options.utilityPath == self._automation.DIST_BIN):
options.utilityPath = productRoot + "/bin"
if options.remoteWebServer == None:
if os.name != "nt":
options.remoteWebServer = automation.getLanIp()
else:
print "ERROR: you must specify a --remote-webserver=<ip address>\n"
return None
options.webServer = options.remoteWebServer
if (options.deviceIP == None):
print "ERROR: you must provide a device IP"
return None
if (options.remoteLogFile == None):
options.remoteLogFile = options.remoteTestRoot + '/logs/mochitest.log'
if (options.remoteLogFile.count('/') < 1):
options.remoteLogFile = options.remoteTestRoot + '/' + options.remoteLogFile
# remoteAppPath or app must be specified to find the product to launch
if (options.remoteAppPath and options.app):
print "ERROR: You cannot specify both the remoteAppPath and the app setting"
return None
elif (options.remoteAppPath):
options.app = options.remoteTestRoot + "/" + options.remoteAppPath
elif (options.app == None):
# Neither remoteAppPath nor app are set -- error
print "ERROR: You must specify either appPath or app"
return None
# Only reset the xrePath if it wasn't provided
if (options.xrePath == None):
if (automation._product == "fennec"):
options.xrePath = productRoot + "/xulrunner"
else:
options.xrePath = options.utilityPath
if (options.pidFile != ""):
f = open(options.pidFile, 'w')
f.write("%s" % os.getpid())
f.close()
# Robocop specific options
if options.robocop != "":
if not os.path.exists(options.robocop):
print "ERROR: Unable to find specified manifest '%s'" % options.robocop
return None
options.robocop = os.path.abspath(options.robocop)
if options.robocopPath != "":
if not os.path.exists(os.path.join(options.robocopPath, 'robocop.apk')):
print "ERROR: Unable to find robocop.apk in path '%s'" % options.robocopPath
return None
options.robocopPath = os.path.abspath(options.robocopPath)
if options.robocopIds != "":
if not os.path.exists(options.robocopIds):
print "ERROR: Unable to find specified IDs file '%s'" % options.robocopIds
return None
options.robocopIds = os.path.abspath(options.robocopIds)
return options
def verifyOptions(self, options, mochitest):
# since we are reusing verifyOptions, it will exit if App is not found
temp = options.app
options.app = sys.argv[0]
tempPort = options.httpPort
tempSSL = options.sslPort
tempIP = options.webServer
options = MochitestOptions.verifyOptions(self, options, mochitest)
options.webServer = tempIP
options.app = temp
options.sslPort = tempSSL
options.httpPort = tempPort
return options
class MochiRemote(Mochitest):
_automation = None
_dm = None
localProfile = None
logLines = []
def __init__(self, automation, devmgr, options):
self._automation = automation
Mochitest.__init__(self, self._automation)
self._dm = devmgr
self.runSSLTunnel = False
self.remoteProfile = options.remoteTestRoot + "/profile"
self._automation.setRemoteProfile(self.remoteProfile)
self.remoteLog = options.remoteLogFile
self.localLog = options.logFile
def cleanup(self, manifest, options):
if self._dm.fileExists(self.remoteLog):
self._dm.getFile(self.remoteLog, self.localLog)
self._dm.removeFile(self.remoteLog)
else:
print "WARNING: Unable to retrieve log file (%s) from remote " \
"device" % self.remoteLog
self._dm.removeDir(self.remoteProfile)
if (options.pidFile != ""):
try:
os.remove(options.pidFile)
os.remove(options.pidFile + ".xpcshell.pid")
except:
print "Warning: cleaning up pidfile '%s' was unsuccessful from the test harness" % options.pidFile
def findPath(self, paths, filename = None):
for path in paths:
p = path
if filename:
p = os.path.join(p, filename)
if os.path.exists(self.getFullPath(p)):
return path
return None
def startWebServer(self, options):
""" Create the webserver on the host and start it up """
remoteXrePath = options.xrePath
remoteProfilePath = options.profilePath
remoteUtilityPath = options.utilityPath
localAutomation = Automation()
localAutomation.IS_WIN32 = False
localAutomation.IS_LINUX = False
localAutomation.IS_MAC = False
localAutomation.UNIXISH = False
hostos = sys.platform
if (hostos == 'mac' or hostos == 'darwin'):
localAutomation.IS_MAC = True
elif (hostos == 'linux' or hostos == 'linux2'):
localAutomation.IS_LINUX = True
localAutomation.UNIXISH = True
elif (hostos == 'win32' or hostos == 'win64'):
localAutomation.BIN_SUFFIX = ".exe"
localAutomation.IS_WIN32 = True
paths = [options.xrePath, localAutomation.DIST_BIN, self._automation._product, os.path.join('..', self._automation._product)]
options.xrePath = self.findPath(paths)
if options.xrePath == None:
print "ERROR: unable to find xulrunner path for %s, please specify with --xre-path" % (os.name)
sys.exit(1)
paths.append("bin")
paths.append(os.path.join("..", "bin"))
xpcshell = "xpcshell"
if (os.name == "nt"):
xpcshell += ".exe"
if (options.utilityPath):
paths.insert(0, options.utilityPath)
options.utilityPath = self.findPath(paths, xpcshell)
if options.utilityPath == None:
print "ERROR: unable to find utility path for %s, please specify with --utility-path" % (os.name)
sys.exit(1)
options.profilePath = tempfile.mkdtemp()
self.server = MochitestServer(localAutomation, options)
self.server.start()
if (options.pidFile != ""):
f = open(options.pidFile + ".xpcshell.pid", 'w')
f.write("%s" % self.server._process.pid)
f.close()
self.server.ensureReady(self.SERVER_STARTUP_TIMEOUT)
options.xrePath = remoteXrePath
options.utilityPath = remoteUtilityPath
options.profilePath = remoteProfilePath
def stopWebServer(self, options):
self.server.stop()
def buildProfile(self, options):
if self.localProfile:
options.profilePath = self.localProfile
manifest = Mochitest.buildProfile(self, options)
self.localProfile = options.profilePath
self._dm.removeDir(self.remoteProfile)
try:
self._dm.pushDir(options.profilePath, self.remoteProfile)
except devicemanager.DMError:
print "Automation Error: Unable to copy profile to device."
raise
options.profilePath = self.remoteProfile
return manifest
def buildURLOptions(self, options, env):
self.localLog = options.logFile
options.logFile = self.remoteLog
options.profilePath = self.localProfile
retVal = Mochitest.buildURLOptions(self, options, env)
#we really need testConfig.js (for browser chrome)
try:
self._dm.pushDir(options.profilePath, self.remoteProfile)
except devicemanager.DMError:
print "Automation Error: Unable to copy profile to device."
raise
options.profilePath = self.remoteProfile
options.logFile = self.localLog
return retVal
def installChromeFile(self, filename, options):
parts = options.app.split('/')
if (parts[0] == options.app):
return "NO_CHROME_ON_DROID"
path = '/'.join(parts[:-1])
manifest = path + "/chrome/" + os.path.basename(filename)
try:
self._dm.pushFile(filename, manifest)
except devicemanager.DMError:
print "Automation Error: Unable to install Chrome files on device."
raise
return manifest
def getLogFilePath(self, logFile):
return logFile
# In the future we could use LogParser: http://hg.mozilla.org/automation/logparser/
def addLogData(self):
with open(self.localLog) as currentLog:
data = currentLog.readlines()
restart = re.compile('0 INFO SimpleTest START.*')
reend = re.compile('([0-9]+) INFO TEST-START . Shutdown.*')
start_found = False
end_found = False
for line in data:
if reend.match(line):
end_found = True
start_found = False
return
if start_found and not end_found:
# Append the line without the number to increment
self.logLines.append(' '.join(line.split(' ')[1:]))
if restart.match(line):
start_found = True
def printLog(self):
passed = 0
failed = 0
todo = 0
incr = 1
logFile = []
logFile.append("0 INFO SimpleTest START")
for line in self.logLines:
if line.startswith("INFO TEST-PASS"):
passed += 1
elif line.startswith("INFO TEST-UNEXPECTED"):
failed += 1
elif line.startswith("INFO TEST-KNOWN"):
todo += 1
incr += 1
logFile.append("%s INFO TEST-START | Shutdown" % incr)
incr += 1
logFile.append("%s INFO Passed: %s" % (incr, passed))
incr += 1
logFile.append("%s INFO Failed: %s" % (incr, failed))
incr += 1
logFile.append("%s INFO Todo: %s" % (incr, todo))
incr += 1
logFile.append("%s INFO SimpleTest FINISHED" % incr)
# TODO: Consider not printing to stdout because we might be duplicating output
print '\n'.join(logFile)
with open(self.localLog, 'w') as localLog:
localLog.write('\n'.join(logFile))
if failed > 0:
return 1
return 0
def main():
scriptdir = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
auto = RemoteAutomation(None, "fennec")
parser = RemoteOptions(auto, scriptdir)
options, args = parser.parse_args()
if (options.dm_trans == "adb"):
if (options.deviceIP):
dm = devicemanagerADB.DeviceManagerADB(options.deviceIP, options.devicePort)
else:
dm = devicemanagerADB.DeviceManagerADB()
else:
dm = devicemanagerSUT.DeviceManagerSUT(options.deviceIP, options.devicePort)
auto.setDeviceManager(dm)
options = parser.verifyRemoteOptions(options, auto)
if (options == None):
print "ERROR: Invalid options specified, use --help for a list of valid options"
sys.exit(1)
productPieces = options.remoteProductName.split('.')
if (productPieces != None):
auto.setProduct(productPieces[0])
else:
auto.setProduct(options.remoteProductName)
mochitest = MochiRemote(auto, dm, options)
options = parser.verifyOptions(options, mochitest)
if (options == None):
sys.exit(1)
logParent = os.path.dirname(options.remoteLogFile)
dm.mkDir(logParent);
auto.setRemoteLog(options.remoteLogFile)
auto.setServerInfo(options.webServer, options.httpPort, options.sslPort)
print dm.getInfo()
procName = options.app.split('/')[-1]
if (dm.processExist(procName)):
dm.killProcess(procName)
if options.robocop != "":
mp = manifestparser.TestManifest(strict=False)
# TODO: pull this in dynamically
mp.read(options.robocop)
robocop_tests = mp.active_tests(exists=False)
fHandle = tempfile.NamedTemporaryFile(suffix='.config',
prefix='robotium-',
dir=os.getcwd(),
delete=False)
fHandle.write("profile=%s\n" % (mochitest.remoteProfile))
fHandle.write("logfile=%s\n" % (options.remoteLogFile))
fHandle.write("host=http://mochi.test:8888/tests\n")
fHandle.write("rawhost=http://%s:%s/tests\n" % (options.remoteWebServer, options.httpPort))
fHandle.close()
deviceRoot = dm.getDeviceRoot()
dm.removeFile(os.path.join(deviceRoot, "fennec_ids.txt"))
dm.removeFile(os.path.join(deviceRoot, "robotium.config"))
dm.pushFile(fHandle.name, os.path.join(deviceRoot, "robotium.config"))
os.unlink(fHandle.name)
fennec_ids = os.path.abspath("fennec_ids.txt")
if not os.path.exists(fennec_ids) and options.robocopIds:
fennec_ids = options.robocopIds
dm.pushFile(fennec_ids, os.path.join(deviceRoot, "fennec_ids.txt"))
options.extraPrefs.append('robocop.logfile="%s/robocop.log"' % deviceRoot)
options.extraPrefs.append('browser.search.suggest.enabled=true')
options.extraPrefs.append('browser.search.suggest.prompted=true')
if (options.dm_trans == 'adb' and options.robocopPath):
dm._checkCmd(["install", "-r", os.path.join(options.robocopPath, "robocop.apk")])
appname = options.app
retVal = None
logcat = []
for test in robocop_tests:
if options.testPath and options.testPath != test['name']:
continue
options.app = "am"
options.browserArgs = ["instrument", "-w", "-e", "deviceroot", deviceRoot, "-e", "class"]
options.browserArgs.append("%s.tests.%s" % (appname, test['name']))
options.browserArgs.append("org.mozilla.roboexample.test/%s.FennecInstrumentationTestRunner" % appname)
try:
dm.recordLogcat()
retVal = mochitest.runTests(options)
logcat = dm.getLogcat()
mochitest.addLogData()
except:
print "Automation Error: Exception caught while running tests"
traceback.print_exc()
mochitest.stopWebServer(options)
mochitest.stopWebSocketServer(options)
try:
self.cleanup(None, options)
except:
pass
sys.exit(1)
if retVal is None:
print "No tests run. Did you pass an invalid TEST_PATH?"
retVal = 1
retVal = mochitest.printLog()
else:
try:
dm.recordLogcat()
retVal = mochitest.runTests(options)
logcat = dm.getLogcat()
except:
print "Automation Error: Exception caught while running tests"
traceback.print_exc()
mochitest.stopWebServer(options)
mochitest.stopWebSocketServer(options)
try:
self.cleanup(None, options)
except:
pass
sys.exit(1)
print ''.join(logcat[-500:-1])
print dm.getInfo()
sys.exit(retVal)
if __name__ == "__main__":
main()
|
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn2, venn2_circles, venn3, venn3_circles
from scipy.stats import hypergeom
from os import path, makedirs
from statsmodels.sandbox.stats.multicomp import multipletests
import numpy as np
from biom import load_table
import seaborn as sns
import json
from collections import defaultdict, OrderedDict
from datetime import datetime
from KEGG_parser.parsers import parse_ko, parse_rn, parse_co, parse_pathway
from KEGG_parser.downloader import get_kegg_record_dict
sns.set()
# TODO: take multiple files
class Logger(OrderedDict):
""""""
def __init__(self, output):
super(Logger, self).__init__()
self.output_file = output
self['start time'] = datetime.now()
def output_log(self):
with open(self.output_file, 'w') as f:
self['finish time'] = datetime.now()
self['elapsed time'] = self['finish time'] - self['start time']
for key, value in self.items():
f.write(key + ': ' + str(value) + '\n')
def p_adjust(pvalues, method='fdr_bh'):
res = multipletests(pvalues, method=method)
return np.array(res[1], dtype=float)
def read_in_ids(file_loc, keep_separated=False, samples_are_columns=False, name=None):
"""
Read in kos from whitespace separated list (.txt), tsv with KOs as row headers (.tsv/.csv) or biom table (.biom).
"""
if file_loc.endswith('.txt'):
if name is None:
raise ValueError('Name must be given if giving .txt list')
return {name: set([i.strip() for i in open(file_loc).read().split()])}
elif (file_loc.endswith('.tsv') or file_loc.endswith('.csv')) and keep_separated:
genome_table = pd.read_table(file_loc, sep=None, index_col=0, engine='python')
samples_dict = dict()
if samples_are_columns:
genome_table = genome_table.transpose()
for sample in genome_table.index:
samples_dict[sample] = set(genome_table.columns[genome_table.loc[sample].astype(bool)])
return samples_dict
elif file_loc.endswith('.tsv') or file_loc.endswith('.csv'):
if name is None:
raise ValueError('Name must be given if giving .tsv or .csv and not separating')
return {name: set(pd.read_table(file_loc, sep=None, index_col=0, engine='python').columns)}
elif file_loc.endswith('.biom') and keep_separated:
id_table = load_table(file_loc)
samples_dict = dict()
for data, sample, _ in id_table.iter(axis='sample'):
samples_dict[sample] = set(id_table.ids(axis='observation')[data.astype(bool)])
return samples_dict
elif file_loc.endswith('.biom'):
if name is None:
raise ValueError('Name must be given if giving .biom and not separating')
id_table = load_table(file_loc)
# first remove KO's which aren't present in any samples
ids_to_keep = id_table.ids(axis='observation')[id_table.sum(axis='observation') > 0]
id_table.filter(ids_to_keep, axis='observation', inplace=True)
return {name: set(id_table.ids(axis='observation'))}
else:
raise ValueError('Input file %s does not have a parsable file ending.')
def get_rns_from_kos(dict_of_kos: dict, ko_dict: dict):
sample_rns = dict()
for sample, list_of_kos in dict_of_kos.items():
reaction_set = list()
for ko in list_of_kos:
try:
ko_record = ko_dict[ko]
if 'RN' in ko_record['DBLINKS']:
reaction_set += ko_record['DBLINKS']['RN']
except KeyError:
pass
sample_rns[sample] = reaction_set
return sample_rns
def get_products_from_rns(dict_of_rns: dict, rn_dict: dict):
return {sample: set([co for rn in list_of_rns for co in rn_dict[rn]['EQUATION'][1]])
for sample, list_of_rns in dict_of_rns.items()}
def reverse_dict_of_lists(dict_of_lists):
reversed_dict = defaultdict(list)
for key, list_ in dict_of_lists.items():
for item in list_:
reversed_dict[item].append(key)
return reversed_dict
def make_compound_origin_table(sample_cos_produced: dict, cos_measured=None):
columns = list(sample_cos_produced.keys())
rows = list()
cos_to_samples_dict = reverse_dict_of_lists(sample_cos_produced)
for co, samples in cos_to_samples_dict.items():
rows.append([sample in samples for sample in columns])
table = pd.DataFrame(rows, index=cos_to_samples_dict.keys(), columns=columns)
if cos_measured is not None:
table['detected'] = [co in cos_measured for co in table.index]
return table
def merge_dicts_of_lists(*dicts):
merged_dicts = defaultdict(list)
for dict_ in dicts:
for key, list_ in dict_.items():
merged_dicts[key] += list_
return merged_dicts
def make_kegg_mapper_input(sample_ids, detected_ids=None, origin_colors=('blue', 'green', 'yellow'),
detected_color='orange'):
samples = list(sample_ids.keys())
microbe_ids = sample_ids[samples[0]]
if len(samples) == 2:
host_ids = sample_ids[samples[1]]
else:
host_ids = ()
if detected_ids is None:
detected_ids = ()
ids = list()
colors = list()
for id_ in set(microbe_ids) | set(host_ids) | set(detected_ids):
# save id
ids.append(id_)
# check where id is present
microbe_present = id_ in microbe_ids
host_present = id_ in host_ids
detected_present = id_ in detected_ids
origin_color = None
detect_color = None
if microbe_present and host_present:
origin_color = origin_colors[1]
elif microbe_present:
origin_color = origin_colors[0]
elif host_present:
origin_color = origin_colors[2]
else:
pass
if detected_present:
detect_color = detected_color
color = ''
if origin_color is not None:
color += origin_color
if detect_color is not None:
color += ',%s' % detect_color
colors.append(color)
df = pd.Series(colors, index=ids)
return df
def make_venn(sample_cos_produced, measured_cos=None, output_loc=None, name1='gene_set_1', name2='gene_set_2'):
samples = list(sample_cos_produced.keys())
bac_cos = sample_cos_produced[samples[0]]
if len(samples) == 2:
host_cos = sample_cos_produced[samples[1]]
else:
host_cos = None
if host_cos is None and measured_cos is None:
raise ValueError("Must give host_cos or measured_cos to make venn diagram")
if host_cos is not None and measured_cos is None:
_ = venn2((set(bac_cos), set(host_cos)),
("Compounds predicted\nproduced by %s" % name1.replace('_', ' '),
"Compounds predicted\nproduced by %s" % name2.replace('_', ' ')),
set_colors=('white',)*2)
_ = venn2_circles((set(bac_cos), set(host_cos)), linestyle='solid')
elif host_cos is None and measured_cos is not None:
_ = venn2((set(bac_cos), set(measured_cos)),
("Compounds predicted\nproduced by %s" % name1.replace('_', ' '), "Compounds measured"),
set_colors=('white',)*2)
_ = venn2_circles((set(bac_cos), set(measured_cos)), linestyle='solid')
else:
_ = venn3((set(measured_cos), set(bac_cos), set(host_cos)),
("Compounds measured", "Compounds predicted\nproduced by %s" % name1.replace('_', ' '),
"Compounds predicted\nproduced by %s" % name2.replace('_', ' ')),
set_colors=('white',)*3)
_ = venn3_circles((set(measured_cos), set(bac_cos), set(host_cos)), linestyle='solid')
if output_loc is not None:
plt.savefig(output_loc, bbox_inches='tight', dpi=300)
else:
plt.show()
def get_pathways_from_cos(co_dict):
pathway_list = list()
for co_record in co_dict.values():
if 'PATHWAY' in co_record:
pathway_list += [pathway[0] for pathway in co_record['PATHWAY']]
return set(pathway_list)
def get_unique_from_dict_of_lists(dict_of_lists):
unique_dict_of_lists = dict()
for key, list_ in dict_of_lists.items():
all_other_values = set([value for other_key, other_list in dict_of_lists.items() for value in other_list
if other_key != key])
unique_dict_of_lists[key] = set(list_) - all_other_values
return unique_dict_of_lists
def get_pathway_to_co_dict(pathway_dict, no_drug=True, no_glycan=True):
pathway_to_co_dict = {pathway_record['NAME']: [compound[0] for compound in pathway_record['COMPOUND']]
for pathway_record in pathway_dict.values() if 'COMPOUND' in pathway_record}
if no_drug:
pathway_to_co_dict = {pathway: [co for co in cos if not co.startswith('D')]
for pathway, cos in pathway_to_co_dict.items()}
if no_glycan:
pathway_to_co_dict = {pathway: [co for co in cos if not co.startswith('G')]
for pathway, cos in pathway_to_co_dict.items()}
return pathway_to_co_dict
def calculate_enrichment(cos, co_pathway_dict, min_pathway_size=10):
all_cos = set([co for co_list in co_pathway_dict.values() for co in co_list])
pathway_names = list()
pathway_data = list()
for pathway, pathway_cos in co_pathway_dict.items():
pathway_present = set(pathway_cos)
if len(pathway_present) > min_pathway_size:
overlap = set(cos) & pathway_present
prob = hypergeom.sf(len(overlap), len(all_cos), len(pathway_present), len(set(cos)))
pathway_names.append(pathway)
pathway_data.append([len(pathway_present), len(overlap), prob])
enrichment_table = pd.DataFrame(pathway_data, index=pathway_names,
columns=["pathway size", "overlap", "probability"])
enrichment_table['adjusted probability'] = p_adjust(enrichment_table.probability)
if np.any((enrichment_table['adjusted probability'] < .05) & (enrichment_table['overlap'] == 0)):
return None
else:
return enrichment_table.sort_values('adjusted probability')
def make_enrichment_clustermap(pathway_enrichment_dfs: dict, key, output_loc, min_p=.1, log=False):
enrichment_p_df = pd.DataFrame.from_dict({sample: pathway_enrichment_df[key] for sample, pathway_enrichment_df in
pathway_enrichment_dfs.items()})
enrichment_p_df = enrichment_p_df.loc[enrichment_p_df.index[(enrichment_p_df<min_p).sum(axis=1) > 0]]
enrichment_p_df = enrichment_p_df[enrichment_p_df.columns[(enrichment_p_df<min_p).sum(axis=0) > 0]]
if log:
enrichment_p_df = np.log(enrichment_p_df)
g = sns.clustermap(enrichment_p_df, col_cluster=False, figsize=(2, 12), cmap="Blues_r", method="average")
_ = plt.setp(g.ax_heatmap.get_xticklabels(), rotation=340, fontsize=12, ha="left")
_ = plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0, fontsize=12)
plt.savefig(output_loc, dpi=500, bbox_inches='tight')
def main(kos_loc, output_dir, other_kos_loc=None, compounds_loc=None, name1='gene_set_1', name2='gene_set_2',
keep_separated=False, samples_are_columns=False, detected_only=False, rxn_compounds_only=False,
unique_only=True, ko_file_loc=None, rn_file_loc=None, co_file_loc=None, pathway_file_loc=None,
write_json=False):
# create output dir to throw error quick
makedirs(output_dir)
logger = Logger(path.join(output_dir, "AMON_log.txt"))
# read in all kos and get records
sample_kos = read_in_ids(kos_loc, keep_separated=keep_separated,
samples_are_columns=samples_are_columns, name=name1)
logger['kos_loc'] = path.abspath(kos_loc)
if other_kos_loc is not None:
sample_kos.update(read_in_ids(other_kos_loc, keep_separated=keep_separated,
samples_are_columns=samples_are_columns, name=name2))
logger['other_kos_loc'] = path.abspath(other_kos_loc)
all_kos = set([value for values in sample_kos.values() for value in values])
logger['Number of samples'] = len(sample_kos)
logger['Total number of KOs'] = len(all_kos)
ko_dict = get_kegg_record_dict(set(all_kos), parse_ko, ko_file_loc)
if write_json:
open(path.join(output_dir, 'ko_dict.json'), 'w').write(json.dumps(ko_dict))
logger['KO json location'] = path.abspath(path.join(output_dir, 'ko_dict.json'))
# get all reactions from kos
sample_rns = get_rns_from_kos(sample_kos, ko_dict)
all_rns = set([value for values in sample_rns.values() for value in values])
logger['Total number of reactions'] = len(all_rns)
# get reactions from kegg
rn_dict = get_kegg_record_dict(set(all_rns), parse_rn, rn_file_loc)
if write_json:
open(path.join(output_dir, 'rn_dict.json'), 'w').write(json.dumps(rn_dict))
logger['RN json location'] = path.abspath(path.join(output_dir, 'rn_dict.json'))
# Get reactions from KEGG and pull cos produced
sample_cos_produced = get_products_from_rns(sample_rns, rn_dict)
# read in compounds that were measured if available
if compounds_loc is not None:
cos_measured = list(read_in_ids(compounds_loc, name='Compounds', keep_separated=False).values())[0]
logger['compounds_loc'] = path.abspath(compounds_loc)
else:
cos_measured = None
# make compound origin table
origin_table = make_compound_origin_table(sample_cos_produced, cos_measured)
# get rid of any all false columns
origin_table = origin_table[origin_table.columns[origin_table.sum().astype(bool)]]
origin_table.to_csv(path.join(output_dir, 'origin_table.tsv'), sep='\t')
logger['Origin table location'] = path.abspath(path.join(output_dir, 'origin_table.tsv'))
# make kegg mapper input if 2 or fewer samples
if len(sample_cos_produced) <= 2:
kegg_mapper_input = make_kegg_mapper_input(merge_dicts_of_lists(sample_kos, sample_cos_produced), cos_measured)
kegg_mapper_input.to_csv(path.join(output_dir, 'kegg_mapper.tsv'), sep='\t')
logger['KEGG mapper location'] = path.abspath(path.join(output_dir, 'kegg_mapper.tsv'))
# Get full set of compounds
all_cos_produced = set([value for values in sample_cos_produced.values() for value in values])
logger['Number of cos produced across samples'] = len(all_cos_produced)
if detected_only:
all_cos_produced = set(all_cos_produced) | set(cos_measured)
logger['Number of cos produced and detected'] = len(all_cos_produced)
# Get compound data from kegg
co_dict = get_kegg_record_dict(all_cos_produced, parse_co, co_file_loc)
if write_json:
open(path.join(output_dir, 'co_dict.json'), 'w').write(json.dumps(co_dict))
# remove compounds without reactions if required
if rxn_compounds_only:
cos_with_rxn = list()
for compound, record in co_dict.items():
if 'REACTION' in record:
cos_with_rxn.append(compound)
cos_measured = set(cos_measured) & set(cos_with_rxn)
# Make venn diagram
if (compounds_loc is not None or len(sample_cos_produced) > 1) and len(sample_cos_produced) <= 2:
make_venn(sample_cos_produced, cos_measured, path.join(output_dir, 'venn.png'))
# Filter compounds down to only cos measured for cos produced and other cos produced
if detected_only:
sample_cos_produced = {sample: set(cos_produced) & set(cos_measured) for sample, cos_produced
in sample_cos_produced.items()}
# find compounds unique to microbes and to host if host included
if unique_only:
sample_cos_produced = get_unique_from_dict_of_lists(sample_cos_produced)
# Get pathway info from pathways in compounds
all_pathways = [pathway.replace('map', 'ko') for pathway in get_pathways_from_cos(co_dict)]
pathway_dict = get_kegg_record_dict(all_pathways, parse_pathway, pathway_file_loc)
pathway_to_compound_dict = get_pathway_to_co_dict(pathway_dict, no_glycan=False)
# calculate enrichment
pathway_enrichment_dfs = dict()
for sample, cos_produced in sample_cos_produced.items():
pathway_enrichment_df = calculate_enrichment(cos_produced, pathway_to_compound_dict)
if pathway_enrichment_df is not None:
pathway_enrichment_df.to_csv(path.join(output_dir, '%s_compound_pathway_enrichment.tsv' % sample), sep='\t')
logger['%s pathway enrichment'] = path.abspath(path.join(output_dir,
'%s_compound_pathway_enrichment.tsv' % sample))
pathway_enrichment_dfs[sample] = pathway_enrichment_df
if len(pathway_enrichment_dfs) > 0:
make_enrichment_clustermap(pathway_enrichment_dfs, 'adjusted probability',
path.join(output_dir, 'enrichment_heatmap.png'))
logger['Enrichment clustermap location'] = path.abspath(path.join(output_dir, 'enrichment_heatmap.png'))
logger.output_log()
|
# Preppin' Data 2021 Week 25
import pandas as pd
import numpy as np
# Load data
gen_1 = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Gen 1')
evolution_group = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Evolution Group')
evolutions = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Evolutions')
mega_evolutions = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Mega Evolutions')
alolan = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Alolan')
galarian = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Galarian')
gigantamax = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Gigantamax')
unattainable = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Unattainable in Sword & Shield')
anime = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Anime Appearances')
# Clean up the list of Gen 1 Pokémon so we have 1 row per Pokémon
gen_1 = gen_1.loc[gen_1.Name.notnull()]
gen_1['#'] = np.int64(gen_1['#'])
# Clean up the Evolution Group input so that we can join it to the Gen 1 list
evolution_group['#'] = np.int64(evolution_group['#'])
evol_lookup = evolution_group[['Evolution Group','#']]
evolution_group_df = evolution_group
del evolution_group_df['Evolution Group']
evolution_group_df = evolution_group_df.drop_duplicates()
gen_1_df = pd.merge(gen_1,evolution_group_df, on = '#', how = 'inner')
# Filter out Starter and Legendary Pokémon
gen_1_df = gen_1_df.loc[gen_1_df['Starter?'] == 0]
gen_1_df = gen_1_df.loc[gen_1_df['Legendary?'] == 0]
# Using the Evolutions input, exclude any Pokémon that evolves from a Pokémon that is not part of Gen 1 or can evolve into a Pokémon outside of Gen 1
evolutions_df = evolutions[evolutions['Evolving to'].isin(gen_1_df['Name'])]
evolutions_df = evolutions_df[evolutions_df['Evolving from'].isin(gen_1_df['Name'])]
# create list of evolving to and from
keep = list(evolutions_df['Evolving from'])
keep_2 = list(evolutions_df['Evolving to'])
keep.extend(keep_2)
keep = list(set(keep))
gen_1_df = gen_1_df[gen_1_df['Name'].isin(keep)]
# Exclude any Pokémon with a mega evolution, Alolan, Galarian or Gigantamax form
exclude_df = pd.concat([mega_evolutions,alolan,galarian,gigantamax])
exclude_df['Name'] = exclude_df['Name'].str.replace('^(Mega|Alolan|Galarian|Gigantamax) ','',regex = True)
exclude_df['Name'] = exclude_df['Name'].str.replace(' (X|Y)$','',regex = True)
# find any pokemon that evolves into a mega / gigantamax
id_lookup = gen_1_df[['#','Name']]
exclude_df = pd.merge(exclude_df,id_lookup, on = 'Name', how = 'inner')
exclude_df = pd.merge(exclude_df,evol_lookup, on = '#', how = 'inner')
del exclude_df['#']
exclude_df = pd.merge(exclude_df,evol_lookup, on = 'Evolution Group', how = 'inner')
# exclude pokemon that can mega evolve etc.
gen_1_df = gen_1_df[~gen_1_df['#'].isin(exclude_df['#'])]
# It's not possible to catch certain Pokémon in the most recent games. These are the only ones we will consider from this point on
gen_1_df = gen_1_df[gen_1_df['Name'].isin(list(unattainable['Name']))]
# We're left with 10 evolution groups. Rank them in ascending order of how many times they've appeared in the anime to see who the worst Pokémon is!
# convert anime to evolution groups
anime_df = pd.merge(anime,id_lookup, left_on = 'Pokemon', right_on = 'Name', how = 'inner')
anime_df = pd.merge(anime_df,evol_lookup, on = '#', how = 'inner')
# count appearances
appearences = anime_df[anime_df['Evolution Group'].isin(list(gen_1_df['Name']))]
appearences = appearences[['Evolution Group','Episode']].drop_duplicates()
appearences = appearences.groupby(['Evolution Group'],as_index=False).count()
appearences.columns = ['Evolution Group','Appearances']
# create rank
appearences['Worst Pokémon'] = appearences['Appearances'].rank(ascending=True)
appearences['Worst Pokémon'] = appearences['Worst Pokémon'].astype(int)
# Output the data
appearences = appearences.sort_values(by='Worst Pokémon', ascending=True).reset_index()
appearences = appearences[['Worst Pokémon','Evolution Group','Appearances']]
appearences.to_csv('prepped_data\\PD 2021 Wk 25 Output.csv', encoding="utf-8-sig", index=False)
print("data prepped!")
|
from __future__ import annotations
import lcs.agents.acs as acs
from lcs import Perception
from lcs.agents.acs2 import ProbabilityEnhancedAttribute
from .. import ImmutableSequence
DETAILED_PEE_PRINTING = True
class Effect(acs.Effect):
def __init__(self, observation):
# Convert dict to ProbabilityEnhancedAttribute
if not all(isinstance(attr, ProbabilityEnhancedAttribute)
for attr in observation):
observation = (ProbabilityEnhancedAttribute(attr)
if isinstance(attr, dict)
else attr
for attr in observation)
super().__init__(observation)
@property
def specify_change(self) -> bool:
"""
Checks whether there is any attribute in the effect part that
is not "pass-through" - so predicts a change.
Returns
-------
bool
True if the effect part predicts a change, False otherwise
"""
if self.is_enhanced():
return True
else:
return super().specify_change
@classmethod
def enhanced_effect(cls, effect1, effect2,
q1: float = 0.5, q2: float = 0.5,
perception: ImmutableSequence = None):
"""
Create a new enhanced effect part.
"""
assert perception is not None
result = cls(observation=effect1)
for i, attr2 in enumerate(effect2):
attr1 = effect1[i]
if attr1 == Effect.WILDCARD and attr2 == Effect.WILDCARD:
continue
if attr1 == Effect.WILDCARD:
attr1 = perception[i]
if attr2 == Effect.WILDCARD:
attr2 = perception[i]
result[i] = ProbabilityEnhancedAttribute.merged_attributes(
attr1, attr2, q1, q2)
return result
@classmethod
def item_anticipate_change(cls, item, p0_item, p1_item) -> bool:
if not isinstance(item, ProbabilityEnhancedAttribute):
if item == cls.WILDCARD:
if p0_item != p1_item:
return False
else:
if p0_item == p1_item:
return False
if item != p1_item:
return False
else:
if not item.does_contain(p1_item):
return False
# All checks passed
return True
def is_specializable(self, p0: Perception, p1: Perception) -> bool:
if self.is_enhanced():
return True
return super().is_specializable(p0, p1)
# @classmethod
# def for_perception_change(cls,
# p0: ImmutableSequence,
# p1: ImmutableSequence,
# cfg: ACS2Configuration):
# """
# Create an Effect that represents the change from perception p0
# to perception p1.
# """
#
# # Start with the resulting perception
# result = cls(observation=p1)
#
# # Insert wildcard characters where necessary
# for idx, eitem in enumerate(result):
# if p0[idx] == p1[idx]:
# result[idx] = cfg.classifier_wildcard
def get_best_anticipation(self, perception: Perception) -> Perception:
"""
Returns the most probable anticipation of the effect part.
This is usually the normal anticipation. However, if PEEs are
activated, the most probable value of each attribute is
taken as the anticipation.
:param perception: Perception
:return:
"""
# TODO: implement the rest after PEEs are implemented
# ('getBestChar' function)
ant = list(perception)
for idx, item in enumerate(self):
if item != self.WILDCARD:
ant[idx] = item
return Perception(ant)
def does_specify_only_changes_backwards(self,
back_anticipation: Perception,
situation: Perception) -> bool:
"""
Returns if the effect part specifies at least one of the percepts.
An PEE attribute never specifies the corresponding percept.
:param back_anticipation: Perception
:param situation: Perception
:return:
"""
for item, back_ant, sit in zip(self, back_anticipation, situation):
if item == self.WILDCARD and back_ant != sit:
# change anticipated backwards although no change should occur
return False
# TODO: if PEEs are implemented, 'isEnhanced()' should be added
# to the condition below
if item != self.WILDCARD and item == back_ant:
return False
return True
def is_enhanced(self) -> bool:
"""
Checks whether any element of the Effect is Probability-Enhanced.
str elements of the Effect are not Enhanced,
ProbabilityEnhancedAttribute elements are Enhanced.
:return: True if this is a Probability-Enhanced Effect, False otherwise
"""
# Sanity check
assert not any(isinstance(elem, dict) and
not isinstance(elem, ProbabilityEnhancedAttribute)
for elem in self)
return any(isinstance(elem, ProbabilityEnhancedAttribute)
for elem in self)
def reduced_to_non_enhanced(self):
if not self.is_enhanced():
return self
result = Effect(self)
for i, elem in enumerate(result):
if isinstance(elem, ProbabilityEnhancedAttribute):
result[i] = self[i].get_best_symbol()
return result
def update_enhanced_effect_probs(self,
perception: Perception,
update_rate: float):
for i, elem in enumerate(self):
if isinstance(elem, ProbabilityEnhancedAttribute):
elem.make_compact()
effect_symbol = perception[i]
elem.increase_probability(effect_symbol, update_rate)
def __str__(self):
if DETAILED_PEE_PRINTING:
return ''.join(str(attr) for attr in self)
else:
if self.is_enhanced():
return '(PEE)' + ''.join(attr for attr
in self.reduced_to_non_enhanced())
else:
return super().__str__()
|
from fastapi import Depends
from fastapi import FastAPI as App
from starlette.responses import Response
from benchmarks.utils import generate_dag
app = App()
def make_depends(type_: str, provider: str) -> str:
return f"{type_} = Depends({provider})"
glbls = {"Depends": Depends}
@app.router.get("/simple")
async def simple() -> Response:
"""An endpoint that does the minimal amount of work"""
return Response()
dep_without_delays = generate_dag(make_depends, glbls, 3, 2, 2, sleep=(0, 0))
@app.router.get("/fast_deps")
async def fast_dependencies(
_: int = Depends(dep_without_delays),
) -> Response:
"""An endpoint with dependencies that execute instantly"""
return Response()
# Establishing an asyncpg -> PostgreSQL connection takes ~75ms
# Running query takes about 1ms
# Hitting okta.com w/ httpx takes ~100ms
# So we'll take a range of 1ms to 100ms as delays for async dependencies
# And then make a medium sized DAG (3 levels, 2 deps per level, so 6 deps total)
dep_with_delays = generate_dag(make_depends, glbls, 3, 2, 2, sleep=(1e-3, 1e-1))
@app.router.get("/slow_deps")
async def slow_dependencies(
_: int = Depends(dep_with_delays),
) -> Response:
"""An endpoint with dependencies that simulate IO"""
return Response()
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
''' Header reading / writing functions for mgh image format
Author: Krish Subramaniam
'''
from os.path import splitext
import numpy as np
from nibabel.volumeutils import (array_to_file, array_from_file, Recoder)
from nibabel.spatialimages import HeaderDataError, ImageFileError, SpatialImage
from nibabel.fileholders import FileHolder, copy_file_map
from nibabel.filename_parser import types_filenames, TypesFilenamesError
from nibabel.arrayproxy import ArrayProxy
# mgh header
# See http://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat
DATA_OFFSET = 284
# Note that mgh data is strictly big endian ( hence the > sign )
header_dtd = [
('version', '>i4'),
('dims', '>i4', (4,)),
('type', '>i4'),
('dof', '>i4'),
('goodRASFlag', '>i2'),
('delta', '>f4', (3,)),
('Mdc', '>f4', (3, 3)),
('Pxyz_c', '>f4', (3,))
]
# Optional footer. Also has more stuff after this, optionally
footer_dtd = [
('mrparms', '>f4', (4,))
]
header_dtype = np.dtype(header_dtd)
footer_dtype = np.dtype(footer_dtd)
hf_dtype = np.dtype(header_dtd + footer_dtd)
# caveat: Note that it's ambiguous to get the code given the bytespervoxel
# caveat 2: Note that the bytespervox you get is in str ( not an int)
_dtdefs = ( # code, conversion function, dtype, bytes per voxel
(0, 'uint8', '>u1', '1', 'MRI_UCHAR', np.uint8, np.dtype(np.uint8),
np.dtype(np.uint8).newbyteorder('>')),
(4, 'int16', '>i2', '2', 'MRI_SHORT', np.int16, np.dtype(np.int16),
np.dtype(np.int16).newbyteorder('>')),
(1, 'int32', '>i4', '4', 'MRI_INT', np.int32, np.dtype(np.int32),
np.dtype(np.int32).newbyteorder('>')),
(3, 'float', '>f4', '4', 'MRI_FLOAT', np.float32, np.dtype(np.float32),
np.dtype(np.float32).newbyteorder('>')))
# make full code alias bank, including dtype column
data_type_codes = Recoder(_dtdefs, fields=('code', 'label', 'dtype',
'bytespervox', 'mritype',
'np_dtype1', 'np_dtype2',
'numpy_dtype'))
class MGHError(Exception):
"""Exception for MGH format related problems.
To be raised whenever MGH is not happy, or we are not happy with
MGH.
"""
pass
class MGHHeader(object):
'''
The header also consists of the footer data which MGH places after the data
chunk.
'''
# Copies of module-level definitions
template_dtype = hf_dtype
_hdrdtype = header_dtype
_ftrdtype = footer_dtype
_data_type_codes = data_type_codes
def __init__(self,
binaryblock=None,
check=True):
''' Initialize header from binary data block
Parameters
----------
binaryblock : {None, string} optional
binary block to set into header. By default, None, in
which case we insert the default empty header block
check : bool, optional
Whether to check content of header in initialization.
Default is True.
'''
if binaryblock is None:
self._header_data = self._empty_headerdata()
return
# check size
if len(binaryblock) != self.template_dtype.itemsize:
raise HeaderDataError('Binary block is wrong size')
hdr = np.ndarray(shape=(),
dtype=self.template_dtype,
buffer=binaryblock)
#if goodRASFlag, discard delta, Mdc and c_ras stuff
if int(hdr['goodRASFlag']) < 0:
hdr = self._set_affine_default(hdr)
self._header_data = hdr.copy()
if check:
self.check_fix()
return
def __str__(self):
''' Print the MGH header object information
'''
txt = []
txt.append(str(self.__class__))
txt.append('Dims: ' + str(self.get_data_shape()))
code = int(self._header_data['type'])
txt.append('MRI Type: ' + self._data_type_codes.mritype[code])
txt.append('goodRASFlag: ' + str(self._header_data['goodRASFlag']))
txt.append('delta: ' + str(self._header_data['delta']))
txt.append('Mdc: ')
txt.append(str(self._header_data['Mdc']))
txt.append('Pxyz_c: ' + str(self._header_data['Pxyz_c']))
txt.append('mrparms: ' + str(self._header_data['mrparms']))
return '\n'.join(txt)
def __getitem__(self, item):
''' Return values from header data
'''
return self._header_data[item]
def __setitem__(self, item, value):
''' Set values in header data
'''
self._header_data[item] = value
def __iter__(self):
return iter(self.keys())
def keys(self):
''' Return keys from header data'''
return list(self.template_dtype.names)
def values(self):
''' Return values from header data'''
data = self._header_data
return [data[key] for key in self.template_dtype.names]
def items(self):
''' Return items from header data'''
return zip(self.keys(), self.values())
@classmethod
def from_header(klass, header=None, check=True):
''' Class method to create MGH header from another MGH header
'''
# own type, return copy
if type(header) == klass:
obj = header.copy()
if check:
obj.check_fix()
return obj
# not own type, make fresh header instance
obj = klass(check=check)
return obj
@classmethod
def from_fileobj(klass, fileobj, check=True):
'''
classmethod for loading a MGH fileobject
'''
# We need the following hack because MGH data stores header information
# after the data chunk too. We read the header initially, deduce the
# dimensions from the header, skip over and then read the footer
# information
hdr_str = fileobj.read(klass._hdrdtype.itemsize)
hdr_str_to_np = np.ndarray(shape=(),
dtype=klass._hdrdtype,
buffer=hdr_str)
if not np.all(hdr_str_to_np['dims']):
raise MGHError('Dimensions of the data should be non-zero')
tp = int(hdr_str_to_np['type'])
fileobj.seek(DATA_OFFSET + \
int(klass._data_type_codes.bytespervox[tp]) * \
np.prod(hdr_str_to_np['dims']))
ftr_str = fileobj.read(klass._ftrdtype.itemsize)
return klass(hdr_str + ftr_str, check)
@property
def binaryblock(self):
''' binary block of data as string
Returns
-------
binaryblock : string
string giving binary data block
'''
return self._header_data.tostring()
def copy(self):
''' Return copy of header
'''
return self.__class__(self.binaryblock, check=False)
def __eq__(self, other):
''' equality between two MGH format headers
Examples
--------
>>> wstr = MGHHeader()
>>> wstr2 = MGHHeader()
>>> wstr == wstr2
True
'''
return self.binaryblock == other.binaryblock
def __ne__(self, other):
return not self == other
def check_fix(self):
''' Pass. maybe for now'''
pass
def get_affine(self):
''' Get the affine transform from the header information.
MGH format doesn't store the transform directly. Instead it's gleaned
from the zooms ( delta ), direction cosines ( Mdc ), RAS centers (
Pxyz_c ) and the dimensions.
'''
hdr = self._header_data
d = np.diag(hdr['delta'])
pcrs_c = hdr['dims'][:3] / 2.0
Mdc = hdr['Mdc'].T
pxyz_0 = hdr['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
M = np.eye(4, 4)
M[0:3, 0:3] = np.dot(Mdc, d)
M[0:3, 3] = pxyz_0.T
return M
# For compatibility with nifti (multiple affines)
get_best_affine = get_affine
def get_vox2ras(self):
'''return the get_affine()
'''
return self.get_affine()
def get_vox2ras_tkr(self):
''' Get the vox2ras-tkr transform. See "Torig" here:
http://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems
'''
ds = np.array(self._header_data['delta'])
ns = (np.array(self._header_data['dims'][:3]) * ds) / 2.0
v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=np.float32)
return v2rtkr
def get_ras2vox(self):
'''return the inverse get_affine()
'''
return np.linalg.inv(self.get_affine())
def get_data_dtype(self):
''' Get numpy dtype for MGH data
For examples see ``set_data_dtype``
'''
code = int(self._header_data['type'])
dtype = self._data_type_codes.numpy_dtype[code]
return dtype
def set_data_dtype(self, datatype):
''' Set numpy dtype for data from code or dtype or type
'''
try:
code = self._data_type_codes[datatype]
except KeyError:
raise MGHError('datatype dtype "%s" not recognized' % datatype)
self._header_data['type'] = code
def get_zooms(self):
''' Get zooms from header
Returns
-------
z : tuple
tuple of header zoom values
'''
hdr = self._header_data
zooms = hdr['delta']
return tuple(zooms[:])
def set_zooms(self, zooms):
''' Set zooms into header fields
See docstring for ``get_zooms`` for examples
'''
hdr = self._header_data
zooms = np.asarray(zooms)
if len(zooms) != len(hdr['delta']):
raise HeaderDataError('Expecting %d zoom values for ndim'
% hdr['delta'])
if np.any(zooms < 0):
raise HeaderDataError('zooms must be positive')
delta = hdr['delta']
delta[:] = zooms[:]
def get_data_shape(self):
''' Get shape of data
'''
dims = self._header_data['dims'][:]
# If last dimension (nframes) is 1, remove it because
# we want to maintain 3D and it's redundant
if int(dims[-1]) == 1:
dims = dims[:-1]
return tuple(int(d) for d in dims)
def set_data_shape(self, shape):
''' Set shape of data
Parameters
----------
shape : sequence
sequence of integers specifying data array shape
'''
dims = self._header_data['dims']
# If len(dims) is 3, add a dimension. MGH header always
# needs 4 dimensions.
if len(shape) == 3:
shape = list(shape)
shape.append(1)
shape = tuple(shape)
dims[:] = shape
self._header_data['delta'][:] = 1.0
def get_data_bytespervox(self):
''' Get the number of bytes per voxel of the data
'''
return int(self._data_type_codes.bytespervox[ \
int(self._header_data['type'])])
def get_data_size(self):
''' Get the number of bytes the data chunk occupies.
'''
return self.get_data_bytespervox() * np.prod(self._header_data['dims'])
def get_data_offset(self):
''' Return offset into data file to read data
'''
return DATA_OFFSET
def get_footer_offset(self):
''' Return offset where the footer resides.
Occurs immediately after the data chunk.
'''
return self.get_data_offset() + self.get_data_size()
def data_from_fileobj(self, fileobj):
''' Read data array from `fileobj`
Parameters
----------
fileobj : file-like
Must be open, and implement ``read`` and ``seek`` methods
Returns
-------
arr : ndarray
data array
'''
dtype = self.get_data_dtype()
shape = self.get_data_shape()
offset = self.get_data_offset()
return array_from_file(shape, dtype, fileobj, offset)
def get_slope_inter(self):
""" MGH format does not do scaling?
"""
return None, None
def _empty_headerdata(self):
''' Return header data for empty header
'''
dt = self.template_dtype
hdr_data = np.zeros((), dtype=dt)
hdr_data['version'] = 1
hdr_data['dims'][:] = np.array([1, 1, 1, 1])
hdr_data['type'] = 3
hdr_data['goodRASFlag'] = 1
hdr_data['delta'][:] = np.array([1, 1, 1])
hdr_data['Mdc'][0][:] = np.array([-1, 0, 0]) # x_ras
hdr_data['Mdc'][1][:] = np.array([0, 0, -1]) # y_ras
hdr_data['Mdc'][2][:] = np.array([0, 1, 0]) # z_ras
hdr_data['Pxyz_c'] = np.array([0, 0, 0]) # c_ras
hdr_data['mrparms'] = np.array([0, 0, 0, 0])
return hdr_data
def _set_format_specifics(self):
''' Set MGH specific header stuff'''
self._header_data['version'] = 1
def _set_affine_default(self, hdr):
''' If goodRASFlag is 0, return the default delta, Mdc and Pxyz_c
'''
hdr['delta'][:] = np.array([1, 1, 1])
hdr['Mdc'][0][:] = np.array([-1, 0, 0]) # x_ras
hdr['Mdc'][1][:] = np.array([0, 0, -1]) # y_ras
hdr['Mdc'][2][:] = np.array([0, 1, 0]) # z_ras
hdr['Pxyz_c'][:] = np.array([0, 0, 0]) # c_ras
return hdr
def writehdr_to(self, fileobj):
''' Write header to fileobj
Write starts at the beginning.
Parameters
----------
fileobj : file-like object
Should implement ``write`` and ``seek`` method
Returns
-------
None
'''
hdr_nofooter = np.ndarray((), dtype=self._hdrdtype,
buffer=self.binaryblock)
# goto the very beginning of the file-like obj
fileobj.seek(0)
fileobj.write(hdr_nofooter.tostring())
def writeftr_to(self, fileobj):
''' Write footer to fileobj
Footer data is located after the data chunk. So move there and write.
Parameters
----------
fileobj : file-like object
Should implement ``write`` and ``seek`` method
Returns
-------
None
'''
ftr_loc_in_hdr = len(self.binaryblock) - self._ftrdtype.itemsize
ftr_nd = np.ndarray((), dtype=self._ftrdtype,
buffer=self.binaryblock, offset=ftr_loc_in_hdr)
fileobj.seek(self.get_footer_offset())
fileobj.write(ftr_nd.tostring())
class MGHImage(SpatialImage):
header_class = MGHHeader
files_types = (('image', '.mgh'),)
_compressed_exts = (('.gz',))
ImageArrayProxy = ArrayProxy
@classmethod
def filespec_to_file_map(klass, filespec):
""" Check for compressed .mgz format, then .mgh format """
if splitext(filespec)[1] == '.mgz':
return dict(image=FileHolder(filename=filespec))
return super(MGHImage, klass).filespec_to_file_map(filespec)
@classmethod
def from_file_map(klass, file_map):
'''Load image from `file_map`
Parameters
----------
file_map : None or mapping, optional
files mapping. If None (default) use object's ``file_map``
attribute instead
'''
mghf = file_map['image'].get_prepare_fileobj('rb')
header = klass.header_class.from_fileobj(mghf)
affine = header.get_affine()
hdr_copy = header.copy()
data = klass.ImageArrayProxy(mghf, hdr_copy)
img = klass(data, affine, header, file_map=file_map)
img._load_cache = {'header': hdr_copy,
'affine': affine.copy(),
'file_map': copy_file_map(file_map)}
return img
def to_file_map(self, file_map=None):
''' Write image to `file_map` or contained ``self.file_map``
Parameters
----------
file_map : None or mapping, optional
files mapping. If None (default) use object's ``file_map``
attribute instead
'''
if file_map is None:
file_map = self.file_map
data = self.get_data()
self.update_header()
hdr = self.get_header()
with file_map['image'].get_prepare_fileobj('wb') as mghf:
self._write_header(mghf, hdr)
self._write_data(mghf, data, hdr)
self._write_footer(mghf, hdr)
self._header = hdr
self.file_map = file_map
def _write_header(self, mghfile, header):
''' Utility routine to write header
Parameters
----------
mghfile : file-like
file-like object implementing ``write``, open for writing
header : header object
'''
header.writehdr_to(mghfile)
def _write_data(self, mghfile, data, header):
''' Utility routine to write image
Parameters
----------
mghfile : file-like
file-like object implementing ``seek`` or ``tell``, and
``write``
data : array-like
array to write
header : analyze-type header object
header
'''
shape = header.get_data_shape()
if data.shape != shape:
raise HeaderDataError('Data should be shape (%s)' %
', '.join(str(s) for s in shape))
offset = header.get_data_offset()
out_dtype = header.get_data_dtype()
array_to_file(data, mghfile, out_dtype, offset)
def _write_footer(self, mghfile, header):
''' Utility routine to write header. This write the footer data
which occurs after the data chunk in mgh file
Parameters
----------
mghfile : file-like
file-like object implementing ``write``, open for writing
header : header object
'''
header.writeftr_to(mghfile)
def _affine2header(self):
""" Unconditionally set affine into the header """
hdr = self._header
shape = self._dataobj.shape
# for more information, go through save_mgh.m in FreeSurfer dist
MdcD = self._affine[:3, :3]
delta = np.sqrt(np.sum(MdcD * MdcD, axis=0))
Mdc = MdcD / np.tile(delta, (3, 1))
Pcrs_c = np.array([0, 0, 0, 1], dtype=np.float)
Pcrs_c[:3] = np.array(shape[:3]) / 2.0
Pxyz_c = np.dot(self._affine, Pcrs_c)
hdr['delta'][:] = delta
hdr['Mdc'][:, :] = Mdc.T
hdr['Pxyz_c'][:] = Pxyz_c[:3]
load = MGHImage.load
save = MGHImage.instance_to_filename
|
import re
import requests
from urllib3.exceptions import InsecureRequestWarning
from actions.casLogin import casLogin
from actions.iapLogin import iapLogin
from actions.utils import Utils
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class wiseLoginService:
# 初始化本地登录类
def __init__(self, userInfo, httpProxy):
if None == userInfo['username'] or '' == userInfo[
'username'] or None == userInfo['password'] or '' == userInfo[
'password'] or None == userInfo[
'schoolName'] or '' == userInfo['schoolName']:
raise Exception('初始化类失败,请键入完整的参数(用户名,密码,学校名称)')
self.username = userInfo['username']
self.password = userInfo['password']
self.schoolName = userInfo['schoolName']
self.session = requests.session()
headers = {
'User-Agent':
'Mozilla/5.0 (Linux; Android 8.0.0; MI 6 Build/OPR1.170623.027; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/92.0.4515.131 Mobile Safari/537.36 okhttp/3.12.4',
}
self.session.headers = headers
self.session.hooks['response'].append(Utils.checkStatus)
self.session.adapters.DEFAULT_RETRIES = 5
if httpProxy != '':
Utils.log('全局代理已启用')
self.session.proxies = {'http': httpProxy, 'https': httpProxy}
self.session.hooks['response'].append(Utils.checkStatus)
self.login_url = ''
self.campus_host = ''
self.login_host = ''
self.loginEntity = None
self.login_type = ''
# 通过学校名称借助api获取学校的登陆url
def getLoginUrlBySchoolName(self):
schools = self.session.get(
'https://mobile.campushoy.com/v6/config/guest/tenant/list',
verify=False).json()['data']
flag = False
for item in schools:
if item['name'] == self.schoolName:
flag = True
if item['joinType'] == 'NONE':
raise Exception(self.schoolName + '未加入今日校园,请检查...')
params = {'ids': item['id']}
data = self.session.get(
'https://mobile.campushoy.com/v6/config/guest/tenant/info',
params=params,
verify=False,
).json()['data'][0]
joinType = data['joinType']
ampUrl = data['ampUrl']
ampUrl2 = data['ampUrl2']
if 'campusphere' in ampUrl:
clientUrl = ampUrl
elif 'campusphere' in ampUrl2:
clientUrl = ampUrl2
else:
raise Exception('未找到客户端登录地址')
res = self.session.get(clientUrl, verify=False)
self.campus_host = re.findall('\w{4,5}\:\/\/.*?\/',
clientUrl)[0]
self.login_url = res.url
self.login_host = re.findall('\w{4,5}\:\/\/.*?\/', res.url)[0]
self.login_type = joinType
break
if flag == False:
raise Exception(self.schoolName + '不存在或未加入今日校园')
# 通过登陆url判断采用哪种登陆方式
def checkLogin(self):
if self.login_type == 'CLOUD':
self.loginEntity = iapLogin(self.username, self.password,
self.login_url, self.login_host,
self.session)
self.session.cookies = self.loginEntity.login()
else:
self.loginEntity = casLogin(self.username, self.password,
self.login_url, self.login_host,
self.session)
self.session.cookies = self.loginEntity.login()
# 本地化登陆
def login(self):
# 获取学校登陆地址
self.getLoginUrlBySchoolName()
self.checkLogin()
|
"""
Support for the Yahoo! Weather service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.yweather/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
TEMP_CELSIUS, CONF_MONITORED_CONDITIONS, CONF_NAME, STATE_UNKNOWN,
ATTR_ATTRIBUTION)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['yahooweather==0.10']
_LOGGER = logging.getLogger(__name__)
CONF_ATTRIBUTION = "Weather details provided by Yahoo! Inc."
CONF_FORECAST = 'forecast'
CONF_WOEID = 'woeid'
DEFAULT_NAME = 'Yweather'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10)
SENSOR_TYPES = {
'weather_current': ['Current', None],
'weather': ['Condition', None],
'temperature': ['Temperature', 'temperature'],
'temp_min': ['Temperature min', 'temperature'],
'temp_max': ['Temperature max', 'temperature'],
'wind_speed': ['Wind speed', 'speed'],
'humidity': ['Humidity', '%'],
'pressure': ['Pressure', 'pressure'],
'visibility': ['Visibility', 'distance'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_WOEID, default=None): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_FORECAST, default=0):
vol.All(vol.Coerce(int), vol.Range(min=0, max=5)),
vol.Required(CONF_MONITORED_CONDITIONS, default=[]):
[vol.In(SENSOR_TYPES)],
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Yahoo! weather sensor."""
from yahooweather import get_woeid, UNIT_C, UNIT_F
unit = hass.config.units.temperature_unit
woeid = config.get(CONF_WOEID)
forecast = config.get(CONF_FORECAST)
name = config.get(CONF_NAME)
yunit = UNIT_C if unit == TEMP_CELSIUS else UNIT_F
SENSOR_TYPES['temperature'][1] = unit
SENSOR_TYPES['temp_min'][1] = unit
SENSOR_TYPES['temp_max'][1] = unit
# If not exists a customer WOEID/calculation from Home Assistant
if woeid is None:
woeid = get_woeid(hass.config.latitude, hass.config.longitude)
if woeid is None:
_LOGGER.critical("Can't retrieve WOEID from yahoo!")
return False
yahoo_api = YahooWeatherData(woeid, yunit)
if not yahoo_api.update():
_LOGGER.critical("Can't retrieve weather data from Yahoo!")
return False
if forecast >= len(yahoo_api.yahoo.Forecast):
_LOGGER.error("Yahoo! only support %d days forecast!",
len(yahoo_api.yahoo.Forecast))
return False
dev = []
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(YahooWeatherSensor(yahoo_api, name, forecast, variable))
add_devices(dev, True)
class YahooWeatherSensor(Entity):
"""Implementation of the Yahoo! weather sensor."""
def __init__(self, weather_data, name, forecast, sensor_type):
"""Initialize the sensor."""
self._client = name
self._name = SENSOR_TYPES[sensor_type][0]
self._type = sensor_type
self._state = STATE_UNKNOWN
self._unit = SENSOR_TYPES[sensor_type][1]
self._data = weather_data
self._forecast = forecast
self._code = None
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._client, self._name)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._data.yahoo.Units.get(self._unit, self._unit)
@property
def entity_picture(self):
"""Return the entity picture to use in the frontend, if any."""
if self._code is None or "weather" not in self._type:
return None
return self._data.yahoo.getWeatherImage(self._code)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
}
def update(self):
"""Get the latest data from Yahoo! and updates the states."""
self._data.update()
if not self._data.yahoo.RawData:
_LOGGER.info("Don't receive weather data from Yahoo!")
return
# Default code for weather image
self._code = self._data.yahoo.Now['code']
# Read data
if self._type == 'weather_current':
self._state = self._data.yahoo.Now['text']
elif self._type == 'weather':
self._code = self._data.yahoo.Forecast[self._forecast]['code']
self._state = self._data.yahoo.Forecast[self._forecast]['text']
elif self._type == 'temperature':
self._state = self._data.yahoo.Now['temp']
elif self._type == 'temp_min':
self._code = self._data.yahoo.Forecast[self._forecast]['code']
self._state = self._data.yahoo.Forecast[self._forecast]['low']
elif self._type == 'temp_max':
self._code = self._data.yahoo.Forecast[self._forecast]['code']
self._state = self._data.yahoo.Forecast[self._forecast]['high']
elif self._type == 'wind_speed':
self._state = round(float(self._data.yahoo.Wind['speed'])/1.61, 2)
elif self._type == 'humidity':
self._state = self._data.yahoo.Atmosphere['humidity']
elif self._type == 'pressure':
self._state = round(
float(self._data.yahoo.Atmosphere['pressure'])/33.8637526, 2)
elif self._type == 'visibility':
self._state = round(
float(self._data.yahoo.Atmosphere['visibility'])/1.61, 2)
class YahooWeatherData(object):
"""Handle Yahoo! API object and limit updates."""
def __init__(self, woeid, temp_unit):
"""Initialize the data object."""
from yahooweather import YahooWeather
self._yahoo = YahooWeather(woeid, temp_unit)
@property
def yahoo(self):
"""Return Yahoo! API object."""
return self._yahoo
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Yahoo!."""
return self._yahoo.updateWeather()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import teimedlib.pathutils as ptu
"""
scrittura comandi
uash.py dir1/dir2/cmd.py ls .
scrive in cmd.py ls .
"""
def write_cmd(lst):
name = lst[0]
cmd = " ".join(lst[1:])
# path = os.path.join(path_bin(), name)
print(name)
print(cmd)
ptu.make_dir_of_file(name)
with open(name,"w") as f:
f.write(cmd)
if len(sys.argv) > 1:
write_cmd(sys.argv[1:])
|
# Generated by Django 2.2.11 on 2020-04-02 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facility', '0066_auto_20200402_1806'),
]
operations = [
migrations.AlterField(
model_name='historicalpatientregistration',
name='blood_group',
field=models.CharField(blank=True, choices=[('A+', 'A+'), ('A-', 'A-'), ('B+', 'B+'), ('B-', 'B-'), ('AB+', 'AB+'), ('AB-', 'AB-'), ('O+', 'O+'), ('O-', 'O-')], max_length=4, null=True, verbose_name='Blood Group of Patient'),
),
migrations.AlterField(
model_name='historicalpatientregistration',
name='countries_travelled',
field=models.TextField(blank=True, default='', verbose_name='Countries Patient has Travelled to'),
),
migrations.AlterField(
model_name='historicalpatientregistration',
name='date_of_return',
field=models.DateTimeField(blank=True, null=True, verbose_name='Return Date from the Last Country if Travelled'),
),
migrations.AlterField(
model_name='historicalpatientregistration',
name='disease_status',
field=models.IntegerField(blank=True, choices=[(1, 'SUSPECTED'), (2, 'POSITIVE'), (3, 'NEGATIVE'), (4, 'RECOVERY'), (5, 'RECOVERED'), (5, 'EXPIRED')], default=1, verbose_name='Disease Status'),
),
migrations.AlterField(
model_name='historicalpatientregistration',
name='number_of_aged_dependents',
field=models.IntegerField(blank=True, default=0, verbose_name='Number of people aged above 60 living with the patient'),
),
migrations.AlterField(
model_name='historicalpatientregistration',
name='number_of_chronic_diseased_dependents',
field=models.IntegerField(blank=True, default=0, verbose_name='Number of people who have chronic diseases living with the patient'),
),
migrations.AlterField(
model_name='historicalpatientregistration',
name='present_health',
field=models.TextField(blank=True, default='', verbose_name="Patient's Current Health Details"),
),
migrations.AlterField(
model_name='patientregistration',
name='blood_group',
field=models.CharField(blank=True, choices=[('A+', 'A+'), ('A-', 'A-'), ('B+', 'B+'), ('B-', 'B-'), ('AB+', 'AB+'), ('AB-', 'AB-'), ('O+', 'O+'), ('O-', 'O-')], max_length=4, null=True, verbose_name='Blood Group of Patient'),
),
migrations.AlterField(
model_name='patientregistration',
name='countries_travelled',
field=models.TextField(blank=True, default='', verbose_name='Countries Patient has Travelled to'),
),
migrations.AlterField(
model_name='patientregistration',
name='date_of_return',
field=models.DateTimeField(blank=True, null=True, verbose_name='Return Date from the Last Country if Travelled'),
),
migrations.AlterField(
model_name='patientregistration',
name='disease_status',
field=models.IntegerField(blank=True, choices=[(1, 'SUSPECTED'), (2, 'POSITIVE'), (3, 'NEGATIVE'), (4, 'RECOVERY'), (5, 'RECOVERED'), (5, 'EXPIRED')], default=1, verbose_name='Disease Status'),
),
migrations.AlterField(
model_name='patientregistration',
name='number_of_aged_dependents',
field=models.IntegerField(blank=True, default=0, verbose_name='Number of people aged above 60 living with the patient'),
),
migrations.AlterField(
model_name='patientregistration',
name='number_of_chronic_diseased_dependents',
field=models.IntegerField(blank=True, default=0, verbose_name='Number of people who have chronic diseases living with the patient'),
),
migrations.AlterField(
model_name='patientregistration',
name='present_health',
field=models.TextField(blank=True, default='', verbose_name="Patient's Current Health Details"),
),
]
|
import tensorflow as tf
import nalp.utils.logging as l
from nalp.models.generators.rmc import RMCGenerator
from nalp.models.layers.gumbel_softmax import GumbelSoftmax
logger = l.get_logger(__name__)
class GumbelRMCGenerator(RMCGenerator):
"""A GumbelRMCGenerator class is the one in charge of a generative Gumbel-based Relational Memory Core implementation.
"""
def __init__(self, encoder=None, vocab_size=1, embedding_size=32,
n_slots=3, n_heads=5, head_size=10, n_blocks=1, n_layers=3,
tau=5):
"""Initialization method.
Args:
encoder (IntegerEncoder): An index to vocabulary encoder.
vocab_size (int): The size of the vocabulary.
embedding_size (int): The size of the embedding layer.
n_slots (int): Number of memory slots.
n_heads (int): Number of attention heads.
head_size (int): Size of each attention head.
n_blocks (int): Number of feed-forward networks.
n_layers (int): Amout of layers per feed-forward network.
tau (float): Gumbel-Softmax temperature parameter.
"""
logger.info('Overriding class: RMCGenerator -> GumbelRMCGenerator.')
# Overrides its parent class with any custom arguments if needed
super(GumbelRMCGenerator, self).__init__(encoder, vocab_size, embedding_size,
n_slots, n_heads, head_size, n_blocks, n_layers)
# Defining a property to hold the Gumbel-Softmax temperature parameter
self.tau = tau
# Creates a Gumbel-Softmax layer
self.gumbel = GumbelSoftmax(name='gumbel')
@property
def tau(self):
"""float: Gumbel-Softmax temperature parameter.
"""
return self._tau
@tau.setter
def tau(self, tau):
self._tau = tau
def call(self, x):
"""Method that holds vital information whenever this class is called.
Args:
x (tf.Tensor): A tensorflow's tensor holding input data.
Returns:
Logit-based predictions, Gumbel-Softmax outputs and predicted token.
"""
# Firstly, we apply the embedding layer
x = self.embedding(x)
# We need to apply the input into the first recurrent layer
x = self.rnn(x)
# The input also suffers a linear combination to output correct shape
x = self.linear(x)
# Lastly, we apply the Gumbel-Softmax layer
x_g, y_g = self.gumbel(x, self.tau)
return x, x_g, y_g
def generate_text(self, start, length=100, temperature=1.0):
"""Generates text by feeding to the network the
current token (t) and predicting the next token (t+1).
Args:
start (str): The start string to generate the text.
length (int): Length of generated text.
temperature (float): A temperature value to sample the token.
Returns:
A list of generated text.
"""
logger.debug(f'Generating text with length: {length} ...')
# Applying Gumbel-Softmax temperature as argument
self.tau = temperature
# Encoding the start string into tokens
start_tokens = self.encoder.encode(start)
# Expanding the first dimension of tensor
start_tokens = tf.expand_dims(start_tokens, 0)
# Creating an empty list to hold the sampled_tokens
sampled_tokens = []
# Resetting the network states
self.reset_states()
# For every possible generation
for i in range(length):
# Predicts the current token
_, preds, _ = self(start_tokens)
# Removes the first dimension of the tensor
preds = tf.squeeze(preds, 0)
# Samples a predicted token
sampled_token = tf.argmax(preds, -1)[-1].numpy()
# Put the sampled token back to the current token
start_tokens = tf.expand_dims([sampled_token], 0)
# Appends the sampled token to the list
sampled_tokens.append(sampled_token)
# Decodes the list into raw text
text = self.encoder.decode(sampled_tokens)
return text
|
#!/bin/env python
#
# File: PyMOLConvertPMLToPSE.py
# Author: Manish Sud <msud@san.rr.com>
#
# Copyright (C) 2018 Manish Sud. All rights reserved.
#
# The functionality available in this script is implemented using PyMOL, a
# molecular visualization system on an open source foundation originally
# developed by Warren DeLano.
#
# This file is part of MayaChemTools.
#
# MayaChemTools is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# MayaChemTools is distributed in the hope that it will be useful, but without
# any warranty; without even the implied warranty of merchantability of fitness
# for a particular purpose. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MayaChemTools; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation Inc., 59 Temple Place, Suite 330,
# Boston, MA, 02111-1307, USA.
#
from __future__ import print_function
# Add local python path to the global path and import standard library modules...
import os
import sys; sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), "..", "lib", "Python"))
import time
import re
# PyMOL imports...
try:
import pymol
# Finish launching PyMOL in a command line mode for batch processing (-c)
# along with the following options: disable loading of pymolrc and plugins (-k);
# suppress start up messages (-q)
pymol.finish_launching(['pymol', '-ckq'])
except ImportError as ErrMsg:
sys.stderr.write("\nFailed to import PyMOL module/package: %s\n" % ErrMsg)
sys.stderr.write("Check/update your PyMOL environment and try again.\n\n")
sys.exit(1)
# MayaChemTools imports...
try:
from docopt import docopt
import MiscUtil
import PyMOLUtil
except ImportError as ErrMsg:
sys.stderr.write("\nFailed to import MayaChemTools module/package: %s\n" % ErrMsg)
sys.stderr.write("Check/update your MayaChemTools environment and try again.\n\n")
sys.exit(1)
ScriptName = os.path.basename(sys.argv[0])
Options = {}
OptionsInfo = {}
def main():
"""Start execution of the script"""
MiscUtil.PrintInfo("\n%s (PyMOL v%s; %s) Starting...\n" % (ScriptName, pymol.cmd.get_version()[1], time.asctime()))
(WallClockTime, ProcessorTime) = MiscUtil.GetWallClockAndProcessorTime()
# Retrieve command line arguments and options...
RetrieveOptions()
# Process and validate command line arguments and options...
ProcessOptions()
# Perform actions required by the script...
ConvertLigandFileFormat()
MiscUtil.PrintInfo("\n%s: Done...\n" % ScriptName)
MiscUtil.PrintInfo("Total time: %s" % MiscUtil.GetFormattedElapsedTime(WallClockTime, ProcessorTime))
def ConvertLigandFileFormat():
"""Comvert ligand file format."""
Infile = OptionsInfo["Infile"]
Outfile = OptionsInfo["Outfile"]
MiscUtil.PrintInfo("\nGenerating file %s..." % Outfile)
PyMOLUtil.ConvertFileFormat(Infile, Outfile)
if not os.path.exists(Outfile):
MiscUtil.PrintWarning("Failed to generate Outfile file, %s..." % (Outfile))
def ProcessOptions():
"""Process and validate command line arguments and options"""
MiscUtil.PrintInfo("Processing options...")
# Validate options...
ValidateOptions()
OptionsInfo["Infile"] = Options["--infile"]
OptionsInfo["Outfile"] = Options["--outfile"]
def RetrieveOptions():
"""Retrieve command line arguments and options"""
# Get options...
global Options
Options = docopt(_docoptUsage_)
# Set current working directory to the specified directory...
WorkingDir = Options["--workingdir"]
if WorkingDir:
os.chdir(WorkingDir)
# Handle examples option...
if "--examples" in Options and Options["--examples"]:
MiscUtil.PrintInfo(MiscUtil.GetExamplesTextFromDocOptText(_docoptUsage_))
sys.exit(0)
def ValidateOptions():
"""Validate option values"""
MiscUtil.ValidateOptionFilePath("-i, --infile", Options["--infile"])
MiscUtil.ValidateOptionFileExt("-i, --infile", Options["--infile"], "mol mol2 pdb")
MiscUtil.ValidateOptionFileExt("-o, --outfile", Options["--outfile"], "mol mol2 pdb")
MiscUtil.ValidateOptionsOutputFileOverwrite("-o, --outfile", Options["--outfile"], "--overwrite", Options["--overwrite"])
MiscUtil.ValidateOptionsDistinctFileNames("-i, --infile", Options["--infile"], "-o, --outfile", Options["--outfile"])
# Setup a usage string for docopt...
_docoptUsage_ = """
PyMOLConvertLigandFileFormat.py.py - Convert between ligand file formats
Usage:
PyMOLConvertLigandFileFormat.py.py [--overwrite]
[-w <dir>] -i <infile> -o <outfile>
PyMOLConvertLigandFileFormat.py.py -h | --help | -e | --examples
Description:
Convert between ligand file formats.
The supported input and output file formats are: MDLMOL (.mol), MOL2 (.mol2),
and PDB (.pdb).
Options:
-e, --examples
Print examples.
-h, --help
Print this help message.
-i, --infile <infile>
Input file name.
-o, --outfile <outfile>
Output file name.
--overwrite
Overwrite existing files.
-w, --workingdir <dir>
Location of working directory which defaults to the current directory.
Examples:
To convert MDLMOL file format to MOL2 file format, type:
% PyMOLConvertLigandFileFormat.py -i caffeine.mol -o caffeine.mol2
To convert MDLMOL file format to PDB file format, type:
% PyMOLConvertLigandFileFormat.py -i caffeine.mol -o caffeine.pdb
Author:
Manish Sud(msud@san.rr.com)
See also:
PyMOLConvertPMLToPSE.py, PyMOLSplitChainsAndLigands.py,
PyMOLVisualizeMacromolecules.py
Copyright:
Copyright (C) 2018 Manish Sud. All rights reserved.
The functionality available in this script is implemented using PyMOL, a
molecular visualization system on an open source foundation originally
developed by Warren DeLano.
This file is part of MayaChemTools.
MayaChemTools is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your option) any
later version.
"""
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
################################################################
# The contents of this file are subject to the BSD 3Clause (New) License
# you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://directory.fsf.org/wiki/License:BSD_3Clause
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
# The Original Code is part of the PyRadi toolkit.
# The Initial Developer of the Original Code is CJ Willers, Copyright (C) 2006-2015
# All Rights Reserved.
# Contributor(s): ______________________________________.
################################################################
"""
For more detail see the documentation at
| ``http://nelisw.github.io/pyradi-docs/_build/html/index.html``,
| ``http://nelisw.github.io/pyradi-docs/_build/html/rytarggen.html``, or
| ``pyradi/doc/rytarggen.rst``
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__version__= ""
__author__='CJ Willers'
__all__=['create_HDF5_image', 'hdf_Raw', 'hdf_Uniform', 'hdf_disk_photon', 'hdf_stairs',
]
import sys
import numpy as np
import scipy as sp
import scipy.signal as signal
import scipy.stats as stats
import scipy.constants as const
from scipy import interpolate
import re
import pyradi.ryfiles as ryfiles
import pyradi.ryutils as ryutils
import pyradi.ryplot as ryplot
import pyradi.ryprob as ryprob
import pyradi.ryplanck as ryplanck
######################################################################################
def assigncheck(hdf5,path,value):
"""assign a value to a path, checking for prior existence
"""
if path in hdf5:
hdf5[path][...] = value
else:
hdf5[path] = value
######################################################################################
def hdf_Uniform(imghd5,rad_dynrange):
r"""A generating function to create a uniform photon rate image.
The uniform value in the image will have a value of rad_dynrange.
The equivalent image value is expressed as in the same units as the input
The function accepts radiant or photon rate dynamic range units inputs.
This function must be called from rytarggen.create_HDF5_image
Args:
| imghd5 (handle to hdf5 file): file to which image must be added
| rad_dynrange (float): uniform/max radiance value
Returns:
| nothing: as a side effect a set of photon radiance image files are written
Raises:
| No exception is raised.
Author: CJ Willers
"""
# convert to radiance values in photon units
assigncheck(imghd5,'image/rad_dynrange', rad_dynrange * imghd5['image/conversion'][()])
assigncheck(imghd5,'image/rad_min',0.)
# imghd5['image/rad_dynrange'] = rad_dynrange * imghd5['image/conversion'][()]
# imghd5['image/rad_min'] = 0.
# create photon rate radiance image from min to min+dynamic range, with no noise
imghd5['image/PhotonRateRadianceNoNoise'][...] = \
rad_dynrange * np.ones((imghd5['image/imageSizePixels'][()]))
return imghd5
######################################################################################
def hdf_disk_photon(imghd5,rad_min,rad_dynrange,fracdiameter,fracblur):
r"""A generating function to create an image with illuminated circle with blurred boundaries.
The function accepts radiance radiant or photon rate minimum and dynamic range units.
The equivalent image value is expressed as in the same units as the input
This function must be called from rytarggen.create_HDF5_image
Args:
| imghd5 (handle to hdf5 file): file to which image must be added
| rad_min (float): additive minimum radiance value in the image
| rad_dynrange (float): multiplicative radiance scale factor (max value)
| fracdiameter (float): diameter of the disk as fraction of minimum image size
| fracblur (float): blur of the disk as fraction of minimum image size
Returns:
| nothing: as a side effect a set of photon radiance image files are written
Raises:
| No exception is raised.
Author: CJ Willers
"""
# convert to radiance values in photon units
assigncheck(imghd5,'image/rad_dynrange',rad_dynrange * imghd5['image/conversion'][()])
assigncheck(imghd5,'image/rad_min',rad_min * imghd5['image/conversion'][()])
# scale the disk to image size, as fraction
maxSize = np.min((imghd5['image/imageSizeRows'][()], imghd5['image/imageSizeCols'][()]))
assigncheck(imghd5,'image/disk_diameter',fracdiameter * maxSize)
assigncheck(imghd5,'image/blur',fracblur * maxSize)
# imghd5['image/rad_dynrange'] = rad_dynrange * imghd5['image/conversion'][()]
# imghd5['image/rad_min'] = rad_min * imghd5['image/conversion'][()]
# # scale the disk to image size, as fraction
# maxSize = np.min((imghd5['image/imageSizeRows'][()], imghd5['image/imageSizeCols'][()]))
# imghd5['image/disk_diameter'] = fracdiameter * maxSize
# imghd5['image/blur'] = fracblur * maxSize
#create the disk, normalised to unity
varx = np.linspace(-imghd5['image/imageSizeCols'][()]/2, imghd5['image/imageSizeCols'][()]/2, imghd5['image/imageSizePixels'][()][1])
vary = np.linspace(-imghd5['image/imageSizeRows'][()]/2, imghd5['image/imageSizeRows'][()]/2, imghd5['image/imageSizePixels'][()][0])
x1, y1 = np.meshgrid(varx, vary)
delta_x = varx[1] - varx[0]
delta_y = vary[1] - vary[0]
Uin = ryutils.circ(x1,y1,imghd5['image/disk_diameter'][()])
#create blur disk normalised to unity
dia = np.max((1, 2 * round(imghd5['image/blur'][()] / np.max((delta_x,delta_y)))))
varx = np.linspace(-dia, dia, int(2 * dia))
vary = np.linspace(-dia, dia, int(2 * dia))
x, y = np.meshgrid(varx, vary)
H = ryutils.circ(x, y, dia)
# convolve disk with blur
NormLin = (np.abs(signal.convolve2d(Uin, H, mode='same'))/np.sum(H)) ** 2
# create the photon rate radiance image from min to min+dynamic range, with no noise
imghd5['image/PhotonRateRadianceNoNoise'][...] = \
(imghd5['image/rad_min'][()] + NormLin * imghd5['image/rad_dynrange'][()] )
imghd5.flush()
return imghd5
######################################################################################
def hdf_stairs(imghd5,rad_min,rad_dynrange,steps,imtype):
r"""A generating function to create a staircase image, with log/linear and prescribed step count.
The increment along stairs can be linear or logarithmic.
The function accepts radiance radiant or photon rate minimum and dynamic range units.
The equivalent image value is expressed as in lux units.
This function must be called from rytarggen.create_HDF5_image
Args:
| imghd5 (handle to hdf5 file): file to which image must be added
| rad_min (float): additive minimum radiance value in the image
| rad_dynrange (float): radiance multiplicative scale factor (max value)
| steps (int): number of steps in the image
| imtype (string): string to define the type of image to be created ['stairslin','stairslog']
Returns:
| nothing: as a side effect a set of photon radiance image files are written
Raises:
| No exception is raised.
Author: CJ Willers
"""
# convert to radiance values in photon units
assigncheck(imghd5,'image/rad_dynrange',rad_dynrange * imghd5['image/conversion'][()])
assigncheck(imghd5,'image/rad_min',rad_min * imghd5['image/conversion'][()])
assigncheck(imghd5,'image/steps',steps)
assigncheck(imghd5,'image/imtype',imtype)
# imghd5['image/rad_dynrange'] = rad_dynrange * imghd5['image/conversion'][()]
# imghd5['image/rad_min'] = rad_min * imghd5['image/conversion'][()]
# imghd5['image/steps'] = steps
# imghd5['image/imtype'] = imtype
#Create the stairs spatial definition
size = imghd5['image/imageSizePixels'][()][1]
if imtype in ['stairslin']:
varx = np.linspace(0,size-1,size)
else:
varx = np.logspace(-1,np.log10(size-1),size)
varx = ((varx/(size/steps)).astype(int)).astype(float) / steps
varx = varx / np.max(varx)
vary = np.linspace( - imghd5['image/imageSizeRows'][()]/2,
imghd5['image/imageSizeRows'][()]/2,
imghd5['image/imageSizePixels'][()][0])
vary = np.where(np.abs(vary)<imghd5['image/imageSizeRows'][()]/3.,1.,0.)
x, y = np.meshgrid(varx,vary)
NormLin = y * x * np.ones(x.shape)
# create the photon rate radiance image from min to min+dynamic range, with no noise
imghd5['image/PhotonRateRadianceNoNoise'][...] = \
(imghd5['image/rad_min'][()] + NormLin * imghd5['image/rad_dynrange'][()] )
imghd5.flush()
return imghd5
######################################################################################
def hdf_Raw(imghd5,filename,inputSize,outputSize,rad_min=-1,rad_dynrange=-1, imgNum=0,
inputOrigin=[0,0],blocksize=[1,1],sigma=0):
r"""A generating function to create a photon rate image from raw image.
The output image is extracted from the raw image, with blocks of raw image
pixels averaged to single output image pixels. inputOrigin (lowest row,col values)
defines from where in the raw input image the slicing takes place. blocksize defines
how many raw image pixels must be averaged/aggregated together to define a single
output image pixel, resolution is lowered by this factor. sigma is the kernel
size to be used in scipy.filters.gaussian_filter.
The subsampled image will be rescaled to rad_min + rad_dynrange.
The raw image sequence must be of type np.float64 with no header or footer.
The function accepts radiant or photon rate minimum and dynamic range units.
The equivalent image value is expressed as in the same units as the output image
This function must be called from rytarggen.create_HDF5_image
Args:
| imghd5 (handle to hdf5 file): file to which image must be added
| filename (string): Raw file filename, data must be np.float64
| rad_min (float): additive minimum radiance value in the image, -1 to not use scaling
| inputSize ([int,int]): raw image size, number of rows,cols
| outputSize ([int,int]): size of the output image row,cols
| rad_dynrange (float): multiplicative radiance scale factor (max value), -1 to not use scaling
| imgNum (int): image number to be loaded from the image sequence
| inputOrigin ([int,int]): raw image row,col where the image must be extracted from
| blocksize ([int,int]): row,col blocksize in raw image to be averaged to single output pixel
| sigma (float): gaussian spatial filter kernel rms size in raw image pixels
Returns:
| nothing: as a side effect a set of photon radiance image files are written
Raises:
| No exception is raised.
Author: CJ Willers
"""
# print(filename,inputSize,outputSize,rad_min,rad_dynrange, imgNum,inputOrigin,blocksize,sigma)
assigncheck(imghd5,'image/rad_dynrange',rad_dynrange * imghd5['image/conversion'][()])
assigncheck(imghd5,'image/rad_min',rad_min * imghd5['image/conversion'][()])
assigncheck(imghd5,'image/filename',filename)
# imghd5['image/rad_dynrange'] = rad_dynrange * imghd5['image/conversion'][()]
# imghd5['image/rad_min'] = rad_min * imghd5['image/conversion'][()]
# imghd5['image/filename'] = filename
# read the imgNum'th raw image frame from file
nfr,img = ryfiles.readRawFrames(filename, rows=inputSize[0], cols=inputSize[1],
vartype=np.float64, loadFrames=[imgNum])
# print(nfr,img.shape)
if nfr > 0:
#extract the smaller raw image and coalesce/blur
img = ryutils.blurryextract(img[0,:,:], inputOrigin=inputOrigin,
outputSize=outputSize,
sigma=sigma, blocksize=blocksize)
# save the original input image
imghd5['image/equivalentSignal'][...] = img
img = img / imghd5['image/joule_per_photon'][()]
if imghd5['image/rad_min'][()] < 0. and imghd5['image/rad_dynrange'][()] < 0.:
# don't scale the input image
# create photon rate radiance image from input, no scaling, with no noise
PhotonRateRadianceNoNoise = img
else:
# scale the input image
NormLin = (img - np.min(img)) / (np.max(img)- np.min(img))
# create photon rate radiance image from min to min+dynamic range, with no noise
PhotonRateRadianceNoNoise = imghd5['image/rad_min'][()] \
+ NormLin * imghd5['image/rad_dynrange'][()]
else:
print('Unknown image type or file not successfully read: {}\n no image file created'.format(filename))
return imghd5
# save the no noise image
imghd5['image/PhotonRateRadianceNoNoise'][...] = PhotonRateRadianceNoNoise
return imghd5
######################################################################################
def create_HDF5_image(imageName, numPixels, fn, kwargs, wavelength,
saveNoiseImage=False,saveEquivImage=False,
equivalentSignalType='',equivalentSignalUnit='', LinUnits='', seedval=0,fintp=None,
fileHandle=None,noSpaces=False):
r"""This routine serves as calling function to a generating function to create images.
This function expects that the calling function will return photon rate images,
irrespective of the units of the min/max values used to create the image.
Each generating function creates an image of a different type, taking as input
radiant, photon rate, temperature or some other unit, as coded in the generating function.
if fileHandle is None, the file is created anew, if fileHandle is not None, use as
existing file handle
This calling function sets up the image and writes common information and then calls the
generating function of add the specific image type with radiometric units required.
The calling function and its arguments must be given as arguments on this functions
argument list.
The image file is in HDF5 format, containing the
* input parameters to the image creation process
* the image in photon rate units without photon noise
* the image in photon rate units with photon noise
* the image in some equivalent input unit radiant, photometric or photon rate units.
The general procedure in the generating function is to convert the radiance
input values in units [W/m2] or [q/m2.s)] to photon rate radiance in units [q/m2.s)]
by converting by one photon's energy at the stated wavelength by
:math:`Q_p=\frac{h\cdot c}{\lambda}`,
where :math:`\lambda` is wavelength, :math:`h` is Planck's constant and :math:`c` is
the speed of light. The conversion is done at a single wavelength, which is not very accurate.
The better procedure is to create the photon rate image directly using a spectral integral.
The following minimum HDF5 entries are required by pyradi.rystare:
| ``'image/imageName'`` (string): the image name
| ``'image/PhotonRateRadianceNoNoise'`` np.array[M,N]: a float array with the image pixel values no noise
| ``'image/PhotonRateRadiance'`` np.array[M,N]: a float array with the image pixel values with noise
| ``'image/imageSizePixels'``: ([int, int]): number of pixels [row,col]
| ``'image/imageFilename'`` (string): the image file name
| ``'image/wavelength'`` (float): where photon rate calcs are done um
| ``'image/imageSizeRows'`` (int): the number of image rows
| ``'image/imageSizeCols'`` (int): the number of image cols
| ``'image/imageSizeDiagonal'`` (float): the FPA diagonal size in mm
| ``'image/equivalentSignal'`` (float): the equivalent input signal, e.g. temperature or lux (optional)
| ``'image/irradianceWatts'`` (float): the exitance in the image W/m2 (optional)
| ``'image/temperature'`` (float): the maximum target temperature in the image K (optional)
A few minimum entries are required, but you can add any information you wish to the generaring
function, by adding the additional information to the generating function's kwargs.
Args:
| imageName (string/hdffile): the image name, used to form the filename.
| numPixels ([int, int]): number of pixels [row,col].
| fn (Python function): the generating function to be used to calculate the image.
| kwargs (dictionary): kwargs to the passed to the generating function.
| wavelength (float): wavelength where photon rate calcs are done in [m]
| equivalentSignalType (str): type of the equivalent input scale (e.g., irradiance, temperature)
| equivalentSignalUnit (str): units of the equivalent scale (e.g., W/m2, K, lux)
| LinUnits (str): Lin units and definition separated with : (e.g., 'W/(m2.sr)', 'q/(s.m2.sr)')
| seedval (int): a seed for the photon noise generator
| saveNoiseImage (bool): save the noisy image to HDF5 file
| saveEquivImage (bool): save the equivalent image to HDF5 file
| fintp (function or str): interpolation function to map from radiance to equivalent unit,
| if string 'original', then keep the original input image written by hdf_raw()
| fileHandle (filehandle): create new file None, use otherwise
| noSpaces (bool): if True replace all spaces and decimals in filename with '-'
Returns:
| string/hdffile (string): hdf5 filename or open file
| : as a side effect an image file is written
Raises:
| No exception is raised.
Author: CJ Willers
"""
# # see if the input is a string
# inpstr = False
# if sys.version_info[0] > 2:
# inpstr = isinstance(imageName, str)
# else:
# inpstr = isinstance(imageName, basestring)
hdffilename = 'image-{}-{}-{}'.format(imageName, numPixels[0], numPixels[1])
if noSpaces:
hdffilename = hdffilename.replace(' ','-')
hdffilename = hdffilename.replace('.','-')
hdffilename = '{}.hdf5'.format(hdffilename)
if fileHandle is None:
imghd5 = ryfiles.erase_create_HDF(hdffilename)
else:
imghd5 = fileHandle
assigncheck(imghd5,'image/imageName',imageName)
assigncheck(imghd5,'image/imageSizePixels',numPixels)
assigncheck(imghd5,'image/imageSizeRows',numPixels[0])
assigncheck(imghd5,'image/imageSizeCols',numPixels[1])
assigncheck(imghd5,'image/imageFilename',hdffilename)
assigncheck(imghd5,'image/equivalentSignalType',equivalentSignalType)
assigncheck(imghd5,'image/equivalentSignalUnit',equivalentSignalUnit)
assigncheck(imghd5,'image/LinUnits',LinUnits)
assigncheck(imghd5,'image/saveNoiseImage',saveNoiseImage)
assigncheck(imghd5,'image/saveEquivImage',saveEquivImage)
# imghd5['image/imageName'] = imageName
# imghd5['image/imageSizePixels'] = numPixels
# imghd5['image/imageSizeRows'] = numPixels[0]
# imghd5['image/imageSizeCols'] = numPixels[1]
# imghd5['image/imageFilename'] = hdffilename
# imghd5['image/equivalentSignalType'] = equivalentSignalType
# imghd5['image/equivalentSignalUnit'] = equivalentSignalUnit
# imghd5['image/LinUnits'] = LinUnits
# imghd5['image/saveNoiseImage'] = saveNoiseImage
# imghd5['image/saveEquivImage'] = saveEquivImage
if 'image/equivalentSignal' not in imghd5:
dset = imghd5.create_dataset('image/equivalentSignal', numPixels, dtype='float', compression="gzip")
if 'image/PhotonRateRadianceNoNoise' not in imghd5:
dset = imghd5.create_dataset('image/PhotonRateRadianceNoNoise', numPixels, dtype='float', compression="gzip")
if 'image/PhotonRateRadiance' not in imghd5:
dset = imghd5.create_dataset('image/PhotonRateRadiance', numPixels, dtype='float', compression="gzip")
#photon rate radiance in the image ph/(m2.s), with no photon noise, will be filled by rendering function
imghd5['image/PhotonRateRadianceNoNoise'][...] = \
np.zeros((imghd5['image/imageSizePixels'][()][0],imghd5['image/imageSizePixels'][()][1]))
assigncheck(imghd5, 'image/wavelength',wavelength)
# imghd5['image/wavelength'] = wavelength
# use units to determine if photon rate or watts
# joule/photon factor to convert between W/m2 and q/(s.m2)
if isinstance( imghd5['image/wavelength'][()], float):
assigncheck(imghd5,'image/joule_per_photon',const.h * const.c / imghd5['image/wavelength'][()])
# imghd5['image/joule_per_photon'] = const.h * const.c / imghd5['image/wavelength'][()]
else:
assigncheck(imghd5,'image/joule_per_photon',const.h * const.c / np.mean(imghd5['image/wavelength'][()]))
# imghd5['image/joule_per_photon'] = const.h * const.c / np.mean(imghd5['image/wavelength'][()])
conversion = 1.0 if 'q/' in imghd5['image/LinUnits'][()][:3] \
else 1. / imghd5['image/joule_per_photon'][()]
assigncheck(imghd5,'image/conversion',conversion)
# imghd5['image/conversion'] = conversion
kwargs['imghd5'] = imghd5
# call the function that actually generates the image
imghd5 = fn(**kwargs)
# add photon noise in the signal
if imghd5['image/saveNoiseImage'][()]:
imghd5['image/PhotonRateRadiance'][...] = \
ryutils.poissonarray(imghd5['image/PhotonRateRadianceNoNoise'][()], seedval=seedval)
# save equivalent signal
if imghd5['image/saveEquivImage'][()]:
if fintp is None:
# save nonoise image as equivalent signal
imghd5['image/equivalentSignal'][...] = imghd5['image/PhotonRateRadianceNoNoise'][()]
else:
if isinstance(fintp, str): # if string, keep the value written by hdf_raw
pass
else:
# save equivalent signal (e.g., temperature or lux), by interpolation
imghd5['image/equivalentSignal'][...] = fintp(imghd5['image/PhotonRateRadianceNoNoise'][()])
# save the interpolation function to hdf5
assigncheck(imghd5,'image/interpolate_x',fintp.x)
assigncheck(imghd5,'image/interpolate_y',fintp.y)
# imghd5['image/interpolate_x'] = fintp.x
# imghd5['image/interpolate_y'] = fintp.y
imghd5.flush()
imghd5.close()
return hdffilename
######################################################################################
def analyse_HDF5_image(imghd5,plotfile,gwidh=12,gheit=8):
r"""Summarise the image properties and statistics
Args:
| imghd5 (handle to an open hdf5 file): file to be analysed
| plotfile(string): filename for plot graphics
| gwidh (float): graph width in inches
| gheit (float): graph height in inches
Returns:
| nothing: as a side effect a set properties are written and graphs created
Raises:
| No exception is raised.
Author: CJ Willers
"""
from scipy import stats
import pyradi.ryplot
#calculate and display values of these variables
elements = ['image/imageFilename','image/imageName','image/filename','image/rad_dynrange',
'image/rad_min','image/irrad_dynrange','image/irrad_min','image/disk_diameter','image/blur',
'image/blur','image/steps','image/imtype','image/imageSizePixels','image/pixelPitch',
'image/imageSizeRows','image/imageSizeCols','image/imageSizeDiagonal',
'image/equivalentSignalType','image/equivalentSignalUnit','image/LinUnits','image/EinUnits',
'image/saveNoiseImage','image/saveEquivImage','image/joule_per_photon',
'image/conversion',
]
for item in elements:
if item in imghd5:
print('{:30s} : {}'.format(item,imghd5[item][()]))
# wavelength as scalar or vector
print(imghd5)
if isinstance( imghd5['image/wavelength'][()], float):
print('{:30s} : {}'.format('wavelength',imghd5['image/wavelength'][()]))
else:
print('{:30s} : {}'.format('wavelength (mean)',np.mean(imghd5['image/wavelength'][()])))
#calculate and display statistics of these variables
elements = ['image/PhotonRateRadianceNoNoise','image/PhotonRateRadiance',
'image/PhotonRateIrradianceNoNoise','image/PhotonRateIrradiance','image/equivalentSignal'
]
for item in elements:
if item in imghd5:
print('\nStatistics for {}:'.format(item))
print(stats.describe(imghd5[item][()],axis=None))
# plot the images
p = ryplot.Plotter(1,3,1,plotfile, figsize=(gwidh,gheit),doWarning=False)
for item in ['image/PhotonRateRadianceNoNoise','image/PhotonRateIrradianceNoNoise']:
if item in imghd5:
p.showImage(1,imghd5[item][()],item,cbarshow=True)
for item in ['image/PhotonRateRadiance','image/PhotonRateIrradiance']:
if item in imghd5:
p.showImage(2,imghd5[item][()],item,cbarshow=True)
if 'image/equivalentSignal' in imghd5:
p.showImage(3,imghd5['image/equivalentSignal'][()],'image/equivalentSignal',cbarshow=True)
p.saveFig('{}.png'.format(plotfile))
# plot interpolation function
if 'image/interpolate_x' in imghd5:
q = ryplot.Plotter(1,1,1,plotfile, figsize=(12,6),doWarning=False)
q.plot(1,imghd5['image/interpolate_x'][()],imghd5['image/interpolate_y'][()])
q.saveFig('{}-lookup.png'.format(plotfile))
print(50*'='+'\n\n')
######################################################################################
def analyse_HDF5_imageFile(hdffilename,gwidh=12,gheit=8):
r"""Summarise the image properties and statistics
Args:
| imghd5 (hdf5 filename): file to be analysed
| gwidh (float): graph width in inches
| gheit (float): graph height in inches
Returns:
| nothing: as a side effect a set properties are written and graphs created
Raises:
| No exception is raised.
Author: CJ Willers
"""
imghd5 = ryfiles.open_HDF(hdffilename)
analyse_HDF5_image(imghd5,plotfile=hdffilename[:-5],gwidh=gwidh,gheit=gheit)
imghd5.close()
######################################################################################
def calcTemperatureEquivalent(wavelength,sysresp,tmin,tmax):
"""Calc the interpolation function between temperature and photon rate radiance
Args:
| wavelength (np.array): wavelength vector
| sysresp (np.array): system response spectral vector
| tmin (float): minimum temperature in lookup table
| tmax (float): maximum temperature in lookup table
Returns:
| interpolation function
Raises:
| No exception is raised.
Author: CJ Willers
"""
wavelength = wavelength.reshape(-1, 1)
sysresp = sysresp.reshape(-1, 1)
temp = np.linspace(0.99*float(tmin), 1.01*float(tmax), 100).reshape(-1,1)
# radiance in q/(s.m2)
rad = np.trapz(sysresp * ryplanck.planck(wavelength, temp,
type='ql'),wavelength, axis=0).reshape(-1,1) / np.pi
fintpLE = interpolate.interp1d(rad.reshape(-1,), temp.reshape(-1,))
fintpEL = interpolate.interp1d(temp.reshape(-1,), rad.reshape(-1,))
return fintpLE,fintpEL
######################################################################################
def calcLuxEquivalent(wavelength,rad_min,rad_dynrange,units):
"""Calc the interpolation function between lux and photon rate radiance
Assuming single wavelength colour, the specified wavelength value is used to
calculate the lux equivalent lux image for the radiance input range.
Args:
| wavelength (np.array): wavelength vector
| sysresp (np.array): system response spectral vector
| rad_min (float): minimum photon rate radiance lookup table
| rad_dynrange (float): maximum photon rate radiance in lookup table
| units (string): input radiance units q/s or W
Returns:
| interpolation function
Raises:
| No exception is raised.
Author: CJ Willers
"""
if 'q' in units:
conversion = wavelength / (const.h * const.c)
else:
conversion = 1.
Wm2tolux = 683 * 1.019 * np.exp(-285.51 * (wavelength*1e6 - 0.5591)**2)
# convert from q/s to W
rad_minW = rad_min / conversion
rad_dynrangeW = rad_dynrange / conversion
radW = np.linspace(0.99*rad_minW, 1.01*(rad_minW+rad_dynrangeW), 1000)
lux = Wm2tolux * radW
# convert from W back to q/s when setting up the function
fintp = interpolate.interp1d((radW*wavelength / (const.h * const.c)).reshape(-1), lux.reshape(-1))
return fintp
################################################################
################################################################
##
if __name__ == '__init__':
pass
if __name__ == '__main__':
import os.path
import pyradi.ryfiles as ryfiles
import pyradi.ryutils as ryutils
doAll = False
numPixels = [256, 256] # [ROW, COLUMN] size
wavelength = 0.55e-6
#---------- create test images ---------------------
if True:
#create a zero uniform photon rate image
# input in q/(s.m2), output in q/(s.m2), equivalent in q/(s.m2) units
filename = create_HDF5_image(imageName='Zero',
numPixels=numPixels,wavelength=wavelength,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_Uniform, kwargs={'rad_dynrange':0},
equivalentSignalType='Irradiance',equivalentSignalUnit='q/(s.m2.sr)',
LinUnits='q/(s.m2.sr)', seedval=0,fintp=None )
analyse_HDF5_imageFile(filename)
#create a uniform photon rate image with nonzero value, from radiance input
# input in q/(s.m2), output in q/(s.m2), equivalent in q/(s.m2) units
filename = create_HDF5_image(imageName='Uniform',
numPixels=numPixels,wavelength=wavelength,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_Uniform, kwargs={'rad_dynrange':1.3e17},
equivalentSignalType='Irradiance',equivalentSignalUnit='q/(s.m2.sr)',
LinUnits='q/(s.m2.sr)', seedval=0,fintp=None )
analyse_HDF5_imageFile(filename)
# create a disk photon rate image, scaled from unity base, by min + dynamic range
# input in q/(s.m2), output in q/(s.m2), equivalent in q/(s.m2) units
filename = create_HDF5_image(imageName='Disk',
numPixels=numPixels,wavelength=wavelength,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_disk_photon, kwargs={'rad_min':0.0,'rad_dynrange':1.3e17,
'fracdiameter':0.7,'fracblur':0.2},
equivalentSignalType='Irradiance',equivalentSignalUnit='q/(s.m2.sr)',
LinUnits='q/(s.m2.sr)', seedval=0,fintp=None )
analyse_HDF5_imageFile(filename)
# create stair photon rate image, scaled from unity base, by min + dynamic range
# input in W/m2, output in q/(s.m2), equivalent in lux units
rad_min = 9.659e-4
rad_dynrange = 0.483
LinUnits = 'W/(m2.sr)'
fintp = calcLuxEquivalent(wavelength,rad_min,rad_dynrange,LinUnits)
filename = create_HDF5_image(imageName='Stairslin-10',
numPixels=[250, 250], wavelength=wavelength,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_stairs, kwargs={'rad_min':rad_min,'rad_dynrange':rad_dynrange,
'imtype':'stairslin','steps':10},
equivalentSignalType='Irradiance',equivalentSignalUnit='lux',
LinUnits=LinUnits, seedval=0,fintp=fintp )
analyse_HDF5_imageFile(filename)
# create stair photon rate image, scaled from unity base, by min + dynamic range
# input in W/m2, output in q/(s.m2), equivalent in lux units
rad_min = 9.659e-4
rad_dynrange = 0.483
LinUnits = 'W/(m2.sr)'
fintp = calcLuxEquivalent(wavelength,rad_min,rad_dynrange,LinUnits)
filename = create_HDF5_image(imageName='Stairslin-40',
numPixels=[100,520],wavelength=wavelength,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_stairs, kwargs={'rad_min':rad_min,'rad_dynrange':rad_dynrange,
'imtype':'stairslin','steps':40},
equivalentSignalType='Irradiance',equivalentSignalUnit='lux',
LinUnits=LinUnits, seedval=0,fintp=fintp )
analyse_HDF5_imageFile(filename)
# create stair photon rate image, scaled from unity base, by min + dynamic range
# low light level input in W/m2, output in q/(s.m2), equivalent in lux units
rad_min =9.659e-6
rad_dynrange = 4.829e-3
LinUnits = 'W/(m2.sr)'
fintp = calcLuxEquivalent(wavelength,rad_min,rad_dynrange,LinUnits)
filename = create_HDF5_image(imageName='Stairslin-LowLight-40',
numPixels=[100,520],wavelength=wavelength,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_stairs, kwargs={'rad_min':rad_min,'rad_dynrange':rad_dynrange,
'imtype':'stairslin','steps':40},
equivalentSignalType='Irradiance',equivalentSignalUnit='lux',
LinUnits='W/(m2.sr)', seedval=0,fintp=fintp )
analyse_HDF5_imageFile(filename)
# create photon rate image from raw, unscaled
filename = create_HDF5_image(imageName='PtaInd-13Dec14h00X',
numPixels=[512,512],wavelength=4.5e-6,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_Raw, kwargs={'filename':'data/PtaInd-13Dec14h00X.bin',
'inputSize':[512,512],'outputSize':[512,512],
'rad_min':-1,'rad_dynrange':-1,'imgNum':0},
equivalentSignalType='Irradiance',equivalentSignalUnit='W/m2',
LinUnits='W/(m2.sr)', seedval=0,fintp=None )
analyse_HDF5_imageFile(filename)
# def hdf_Raw(imghd5,filename,inputSize,outputSize,rad_min=-1,rad_dynrange=-1, imgNum=0,
# inputOrigin=[0,0],blocksize=[1,1],sigma=0):
# create photon rate image from raw, unscaled
filename = create_HDF5_image(imageName='StairIR-raw',
numPixels=[100,256],wavelength=4.5e-6,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_Raw, kwargs={'filename':'data/StairIR-raw.double',
'inputSize':[100,256],'outputSize':[100,256],
'rad_min':-1,'rad_dynrange':-1,'imgNum':0},
equivalentSignalType='Irradiance',equivalentSignalUnit='W/m2',
LinUnits='W/(m2.sr)', seedval=0,fintp=None )
analyse_HDF5_imageFile(filename)
#create an infrared image with lin stairs
# work in temperature
tmin = 293 # 20 deg C at minimum level
tmax = 313 # 40 deg C at maximum level
# do a wideband spectral integral
wavelength = np.linspace(3.4,4.9,100)
sysresp = np.ones(wavelength.shape)
fintpLE,fintpEL = calcTemperatureEquivalent(wavelength,sysresp,tmin,tmax)
filename = create_HDF5_image(imageName='StairslinIR-40',
numPixels=[100,520],wavelength=wavelength,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_stairs, kwargs={'rad_min':fintpEL(tmin),
'rad_dynrange':fintpEL(tmax) -fintpEL(tmin),
'imtype':'stairslin','steps':40},
equivalentSignalType='Temperature',equivalentSignalUnit='K',
LinUnits='q/(s.m2.sr)', seedval=0,fintp=fintpLE )
analyse_HDF5_imageFile(filename,15,7)
#create a scaled infrared image derived from raw input image
# use temperatures to define min and max values to which the
# raw input image is scaled: minImg<->tmin and maxImg<->tmax
tmin = 280 # K at minimum level
tmax = 320 # K at maximum level
# do a wideband spectral integral
wavelength = np.linspace(3.7,4.8,100)
sysresp = np.ones(wavelength.shape)
fintpLE,fintpEL = calcTemperatureEquivalent(wavelength,sysresp,tmin,tmax)
# create photon rate image from raw, scaled
filename = create_HDF5_image(imageName='PtaInd-13Dec14h00XScaled',
numPixels=[512,512],wavelength=4.5e-6,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_Raw, kwargs={'filename':'data/PtaInd-13Dec14h00X.bin',
'inputSize':[512,512],'outputSize':[512,512],
'rad_min':fintpEL(tmin),'rad_dynrange':fintpEL(tmax) -fintpEL(tmin),
'imgNum':0},
equivalentSignalType='Temperature',equivalentSignalUnit='K',
LinUnits='q/(s.m2.sr)', seedval=0,fintp=fintpLE )
analyse_HDF5_imageFile(filename,15,7)
#create a uniform photon rate image with nonzero value, for given temperature
# input in q/(s.m2), output in q/(s.m2), equivalent in q/(s.m2) units
tuniform = 295
# do a wideband spectral integral
wavelength = np.linspace(3.7,4.8,100)
sysresp = np.ones(wavelength.shape)
fintpLE,fintpEL = calcTemperatureEquivalent(wavelength,sysresp,tuniform-5,tuniform+5)
# create photon rate image from raw, scaled
filename = create_HDF5_image(imageName='Uniform{:.0f}K'.format(tuniform),
numPixels=numPixels,wavelength=wavelength,
saveNoiseImage=True,saveEquivImage=True,
fn=hdf_Uniform, kwargs={'rad_dynrange':fintpEL(tuniform)},
equivalentSignalType='Temperature',equivalentSignalUnit='K',
LinUnits='q/(s.m2.sr)', seedval=0,fintp=fintpLE )
analyse_HDF5_imageFile(filename)
print('module rytarggen done!')
|
"""StockAnalysis View"""
__docformat__ = "numpy"
# pylint:disable=too-many-arguments, too-many-lines
import copy
import logging
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
)
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.sector_industry_analysis import stockanalysis_model
from gamestonk_terminal.stocks.sector_industry_analysis.financedatabase_model import (
filter_stocks,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_plots_financials(
finance_key: str,
sa_dict: dict,
country: str,
sector: str,
industry: str,
period: str,
period_length: int,
marketcap: str = "",
exclude_exchanges: bool = True,
limit: int = 10,
export: str = "",
raw: bool = False,
already_loaded_stocks_data=None,
):
"""Display financials bars comparing sectors, industry, analysis, countries, market cap and excluding exchanges.
Parameters
----------
finance_key: str
Select finance key from StockAnalysis (e.g. re (Revenue), ce (Cash & Equivalents) and inv (Inventory)
sa_dict: str
The entire collection of options for StockAnalysis separated by statement (BS, IS and CF)
country: str
Search by country to find stocks matching the criteria.
sector : str
Search by sector to find stocks matching the criteria.
industry : str
Search by industry to find stocks matching the criteria.
period : str
Collect either annual, quarterly or trailing financial statements.
period_length : int
Determines how far you wish to look to the past (default is 12 quarters or years)
marketcap : str
Select stocks based on the market cap.
exclude_exchanges: bool
When you wish to include different exchanges use this boolean.
limit: int
Limit amount of companies displayed (default is 10)
export: str
Format to export data as
raw: bool
Output all raw data
already_loaded_stocks_data: Dict
Dictionary of filtered stocks data that has been loaded before
Returns
-------
dict
Dictionary of filtered stocks data
list
List of tickers filtered
"""
if already_loaded_stocks_data is None:
already_loaded_stocks_data = {}
used_statement = [
statement for statement in sa_dict if finance_key in sa_dict[statement]
][0]
if used_statement in already_loaded_stocks_data:
stocks_data = already_loaded_stocks_data
else:
company_tickers = filter_stocks(
country, sector, industry, marketcap, exclude_exchanges
)
if len(company_tickers) <= 1:
console.print("No information is available for the selected market cap. \n")
return dict(), list()
stocks_data = stockanalysis_model.get_stocks_data(
company_tickers, finance_key, sa_dict, already_loaded_stocks_data, period
)
stocks_data_statement = copy.deepcopy(stocks_data[used_statement])
company_tickers = list(stocks_data[used_statement].keys())
if len(stocks_data_statement[company_tickers[0]].columns) > period_length:
console.print(
f"Limiting the amount of periods to the last {period_length} periods."
)
for company in stocks_data_statement:
stocks_data_statement[company] = stocks_data_statement[company][
stocks_data_statement[company].columns[-period_length:]
]
item_name = sa_dict[used_statement][finance_key]
df = pd.DataFrame(
np.nan,
columns=stocks_data_statement.keys(),
index=stocks_data_statement[company_tickers[0]].columns,
)
df.index.name = "Date"
for company in stocks_data_statement:
df[company] = stocks_data_statement[company].loc[item_name]
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
item_name,
df,
)
if len(company_tickers) > limit:
console.print(f"Limiting the amount of companies displayed to {limit}.")
df = df[df.columns[:limit]]
maximum_value = df.max().max()
if maximum_value > 1_000_000_000:
df = df / 1_000_000_000
denomination = "[$ Billions]"
elif 1_000_000_000 > maximum_value:
df = df / 1_000_000
denomination = "[$ Millions]"
elif 1_000_000 > maximum_value:
df = df / 1_000
denomination = "[$ Thousands]"
else:
denomination = ""
if raw:
print_rich_table(
df.fillna("-"),
headers=list(df.columns),
show_index=True,
title=f"{item_name} {denomination}",
)
else:
plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
for company in df.columns:
plt.plot(df[company], ls="-", marker="o", label=company)
plt.title(f"{item_name} {denomination}")
plt.legend(loc="upper left")
if gtff.USE_ION:
plt.ion()
plt.tight_layout()
plt.show()
if not export:
console.print("")
return stocks_data, company_tickers
|
"""
[ BI_error_corrector_2 ]
2019-07-28
system적으로 걸러낼 수 있는 것은 완료.
1. Chunk-based Dependency Corpus의 입력이 될 pickle 파일 출력
::수정 전 파일 (입력 파일)::
".\pickle\0617_02\ch_pred_sent.pickle"
::출력 파일::
".\BI_correct\BI_correct.pickle"
2. 파일 출력 후 수동 오류 개선에 편리하게 코드 수정할 것.
1에서 출력한 파일을 코드 수정 없이 그대로 다시 입력으로 넣으면
수정 되는 항목 없어야 함.
(멀쩡한 BI까지 수정해버리면 안됨!)
(시스템 상에서 수정하지 않은 갯수들이 약 1240여야 함.)
::입력 파일::
".\BI_correct\BI_correct.pickle"
::출력 파일::
".\BI_correct\BI_correct_2.pickle"
--> 이 파일을 입력으로 해서 toChunk-based_DepCorpus_ver3.0.0을 실행
--> BIErrorAnalysis를 통해 에러 유형별 분석
--> rule을 추가할 사항이 있으면 추가. 이외에는 수동으로 오류 수정
3. 출력한 파일을 토대로 수동으로 오류 수정 (약 1240개 ~ 6시간 소요 예상)
Y. Namgoong
"""
from utils_saveNload import load_pickle_chr_pred, save_BI_correct, save_BI_correct_pickle
from utils_saveNload import load_BI_correct, save_BI_correct_2, save_BI_correct_2_pickle
from config import BI_PATH
import os, pickle
if not os.path.isdir(BI_PATH):
os.mkdir(BI_PATH)
def err1(chk_sent):
"""
err1에 해당하는 에러를 규칙으로 처리했더니 과교정으로 err6에 오류가 많이 생겨 이 부분만 따로 수정하기 위해 만든 모듈
B-SYX 뒤에 B_SYX가 여러 개 올 경우 모두 I-SYX로 변경
(수정) SYX가 연달아 두 개 오는 경우는 있음. 이 경우 제외!
:param chk_sent: a sequence of chunk (a sentence)
:return: modified sequence of chunk (in terms of 'SYX')
"""
start = 0
for i, chk in enumerate(chk_sent):
if chk == 'B-SYX':
if start == 1:
chk_sent[i] = 'I-SYX'
elif start == 0:
start += 1
else:
start = 0
return chk_sent
def err_corrector():
# 1번에 해당하는 입력. 이것을 입력으로 한 출력이 ChunkTagger의 결과물
# chk_pred = load_pickle_chr_pred() # chunk tagger로 예측한 결과물이자 toChunk-based_DepCorpus의 입력
# 1번의 출력. 2번의 입력
chk_pred = load_BI_correct()
chk_sent_list = []
for j, chk_sent in enumerate(chk_pred, 1):
for i, chk in enumerate(chk_sent):
# present morpheme info.
bi_tag, chk_tag = chk.split('-')
# next morpheme info.
try:
next_bi_tag, next_chk_tag = chk_sent[i+1].split('-')
if bi_tag == "B" and next_bi_tag == "B":
if len(chk_tag) == 2:
if next_chk_tag == chk_tag: # B-NX, B-NX
pass
elif len(next_chk_tag) == 2: # B-NX, B-PX
pass
elif len(next_chk_tag) == 3: # B-NX, B-JKX
pass
else: pass
elif len(chk_tag) == 3:
if next_chk_tag == chk_tag: # B-JKX, B-JKX # error1
if chk_tag == 'B-SYX':
chk_sent = err1(chk_sent) # managing SYX
elif chk_tag == 'B-JUX':
chk_sent[i+1] = 'I-JUX'
# elif chk_tag == 'B-JKX': # 20855:12 선 한 줄이, 23068:1 그 뿐이
# chk_sent[i+1] = 'I-JKX'
elif chk_tag == 'B-EPX':
chk_sent[i+1] = 'I-EPX'
elif len(next_chk_tag) == 2: # B-JKX, B-NX
pass
elif len(next_chk_tag) == 3: # B-JKX, B-JUX
pass
else: pass
elif bi_tag == "B" and next_bi_tag == "I":
if len(chk_tag) == 2:
if next_chk_tag == chk_tag: # B-NX, I-NX
pass
elif len(next_chk_tag) == 2: # B-NX, I-PX # error2
# print("sent_id:", j)
# print(chk_sent)
# print(chk_sent[i+1], chk)
if chk_sent[i+1] == 'I-PX':
# print(chk_sent)
chk_sent[i] = 'B-PX'
# print(chk_sent) # 여기서 save 해야함.
# input()
elif len(next_chk_tag) == 3: # B-NX, I-JKX # error3
pass # 수동 교정 할 계획
else: pass
elif len(chk_tag) == 3:
if next_chk_tag == chk_tag: # B-JKX, I-JKX
pass
elif len(next_chk_tag) == 2: # B-JKX, I-NX # error4
pass
elif len(next_chk_tag) == 3: # B-JKX, I-JUX # error5
if chk_sent[i] == 'B-SYX' and chk_sent[i+1] == 'I-ECX':
chk_sent[i] = 'I-ECX'
if chk_sent[i-1] == 'B-EFX': # rage error 나는지 확인
chk_sent[i-1] = 'B-ECX'
else: pass
elif bi_tag == "I" and next_bi_tag == "B":
if len(chk_tag) == 2:
if next_chk_tag == chk_tag: # I-NX, B-NX
pass
elif len(next_chk_tag) == 2: # I-NX, B-PX
pass
elif len(next_chk_tag) == 3: # I-NX, B-JKX
pass
else: pass
elif len(chk_tag) == 3:
if next_chk_tag == chk_tag: # I-JKX, B-JKX # error6
chk_sent[i+1].split('-')[0] = 'I'
elif len(next_chk_tag) == 2: # I-JKX, B-NX
pass
elif len(next_chk_tag) == 3: # I-JKX, B-JUX
pass
else: pass
elif bi_tag == "I" and next_bi_tag == "I":
if len(chk_tag) == 2:
if next_chk_tag == chk_tag: # I-NX, I-NX
pass
elif len(next_chk_tag) == 2: # I-NX, I-PX # error7
pass
elif len(next_chk_tag) == 3: # I-NX, I-JKX # error8
if chk == 'I-AX' and chk_sent[i+1] == 'I-JKX':
chk_sent[i+1] = 'I-AX'
else: pass
elif len(chk_tag) == 3:
if next_chk_tag == chk_tag: # I-JKX, I-JKX
pass
elif len(next_chk_tag) == 2: # I-JKX, I-NX # error9
pass
elif len(next_chk_tag) == 3: # I-JKX, I-JUX # error10
chk_sent[i+1] = 'I-' + chk_tag
else: pass
else: pass
except IndexError: # be raised error when the final morpheme comes.
# processing for the last morpheme
if len(chk_tag) == 2: # content words
pass
elif len(chk_tag) == 3: # functional words
pass
else: # for handling exceptions
pass
# print("result: ", j, chk_sent)
# with open(os.path.join(OUT_PATH, 'BI_correct.sent'), 'a', encoding='utf-8') as f:
# f.write(str(chk_sent)+'\n')
# with open(os.path.join(OUT_PATH, 'BI_correct.pickle'), 'ab') as f:
# pickle.dump(chk_sent, f)
chk_sent_list.append(chk_sent)
# save_BI_correct(chk_sent)
save_BI_correct_2(chk_sent)
return chk_sent_list
if __name__ == "__main__":
# save_BI_correct_pickle(err_corrector())
save_BI_correct_2_pickle(err_corrector())
# err1() 모듈을 만들기 위해 test script
# chk_sent = ['B-PX', 'B-EFX', 'B-SYX', 'B-SYX', 'B-SYX', 'B-PX', 'B-EFX', 'B-SYX', 'B-SYX', 'B-SYX', 'B-PX', 'B-EFX']
# print("before:", chk_sent)
# print("after:", err1(chk_sent))
|
from logging import getLogger
from django.core.management.base import BaseCommand, CommandError
from worker.utils import listen, create_lircrc_tempfile
LOGGER = getLogger(__name__)
class Command(BaseCommand):
def handle(*args, **options):
name = 'riker'
fname = create_lircrc_tempfile(name)
LOGGER.warning(
'Created lircrc file at {}; starting to listen.'.format(
fname
)
)
listen(name, fname)
|
import numpy as np
import matplotlib.pyplot as plt
import bead_util as bu
import scipy.signal as ss
path = "/data/20180927/bead1/spinning/0_6mbar"
files = bu.find_all_fnames(path)
index = 0
fdrive = 1210.7
bw = 0.1
bwp = 5.
Ns = 250000
Fs = 5000.
k = 1e-13*(2.*np.pi*370.)**2
df = bu.DataFile()
def proc_f(f):
df.load(f)
df.load_other_data()
df.diagonalize()
drive = df.other_data[2]
resp = ss.detrend(df.pos_data[index])*df.conv_facs[0]/k
drive = ss.detrend(df.other_data[2])*df.conv_facs[0]/k
respft = np.fft.rfft(resp)
driveft = np.fft.rfft(drive)
freqs = np.fft.rfftfreq(Ns, d = 1./Fs)
tarr = np.linspace(0., 50., 250000)
respft_line = respft
driveft_line = driveft
respft_line[np.abs(freqs - fdrive)>bw] = 0.
driveft_line[np.abs(freqs - fdrive)>bw] = 0.
anal_signal_resp = ss.hilbert(np.fft.irfft(respft_line))
anal_signal_drive = ss.hilbert(np.fft.irfft(driveft_line))
phirfft = np.angle(np.sum(respft_line)/np.sum(driveft_line))
phirh = np.unwrap(np.angle(anal_signal_resp)) - np.unwrap(np.angle(anal_signal_drive))
return freqs, respft, driveft, tarr, anal_signal_resp, phirh, phirfft
freqs0, respft0, driveft0, tarr0, anal_signal_resp0, phir0h, phi0fft = proc_f(files[-1])
freqs, respft1, driveft1, tarr, anal_signal_resp1, phir1h, phi1fft = proc_f(files[-2])
freqs, respft2, driveft2, tarr, anal_signal_resp2, phir2h, phi2fft = proc_f(files[-3])
#plot the data
plt.plot(tarr, np.abs(anal_signal_resp2), label = "0-50s")
plt.plot(tarr, np.abs(anal_signal_resp1), label = "350-400s")
plt.plot(tarr, np.abs(anal_signal_resp0), label = "700-750s")
plt.xlabel("Time [s]")
plt.legend()
plt.ylabel("Instantaneous Amplitude [m]")
plt.ylim([0, 4e-10])
plt.xlim([0, 50])
plt.show()
plt.plot(tarr, phir0h, label = "0-50s")
plt.plot(tarr, phir1h-2.*np.pi, label = "350-400s")
plt.plot(tarr, phir2h, label = "700-750s")
plt.xlabel("Time [s]")
plt.ylabel("Drive Response Phase Difference [rad]")
plt.xlim([0, 50])
#plt.ylim([0, 3])
plt.show()
|
#!/usr/bin/env python
"""
This script is used to map fastq files to the genome. The input is a comma
separated list of fastq[.gz] files (or two lists if the input is paired-end).
The output are bam files with the mapped reads, a table containing the number
of reads mapped to each gene and a wiggle file with the coverage of the
reads.
"""
import sys
import argparse
import pysam
import csv
import os
from Bio import SeqIO
import RILseq
def process_command_line(argv):
"""
Return a 2-tuple: (settings object, args list).
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object, replace the description
parser = argparse.ArgumentParser(
description='Map fastq files to the genome using bwa.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'genome_fasta',
help='Name of genome fasta file. The file must be indexed using'
'bwa index command prior to this run.')
parser.add_argument(
'-1', '--fastq_1', action='append', nargs='+',
help='A list of the first read of the sequencing.')
parser.add_argument(
'-2', '--fastq_2', action='append', nargs='*',
help='A list of the second read of the sequencing.'
' The order of these files should be as same as -1. Optional.')
parser.add_argument(
'-g', '--genes_gff',
help='Name of gff file to count the reads per gene. If not given '
' or not readable, skip this stage.')
parser.add_argument(
'-r', '--reverse_complement', default=False,
action='store_true',
help='Treat the reads as reverse complement only when counting'
' number of reads per gene and generating wig file. The resulting BAM'
' files will be the original ones. Use this when treating libraries'
" built using Livny's protocol.")
parser.add_argument(
'-f', '--feature', default='exon',
help='Name of features to count on the GTF file (column 2).')
parser.add_argument(
'-i', '--identifier', default='gene_id',
help='Name of identifier to print (in column 8 of the GTF file).')
parser.add_argument(
'-v', '--overlap', type=int, default=5,
help='Minimal required overlap between the fragment and the feature.')
parser.add_argument(
'-m', '--allowed_mismatches', type=float, default=2,
help="Allowed mismatches for BWA mapping.")
parser.add_argument(
'-o', '--outhead', default='bwa_mapped_single_reads',
help='Output file names of counts table (suffixed _counts.txt) and'
' wiggle file (suffixed _coverage.wig)')
parser.add_argument(
'-d', '--dirout', default='.',
help='Output directory, default is this directory.')
parser.add_argument(
'--bwa_exec', default='bwa',
help='bwa command')
parser.add_argument(
'-S', '--samtools_cmd', default='samtools',
help='Samtools executable.')
parser.add_argument(
'-a', '--params_aln', default='-t 8 -R 200',
help='Additional parameters for aln function of bwa.')
parser.add_argument(
'-s', '--sampe_params', default='-a 1500 -P',
help='Additional parameters for sampe function of bwa.')
parser.add_argument(
'--samse_params', default=' ',
help='Additional parameters for samse function of bwa.')
parser.add_argument(
'-w', '--create_wig', default=False, action='store_true',
help='Create a coverage wiggle file.')
settings = parser.parse_args(argv)
return settings
def main(argv=None):
settings = process_command_line(argv)
if not os.path.exists(settings.dirout):
os.makedirs(settings.dirout)
genome_len = {}
if settings.genome_fasta:
for chrf in SeqIO.parse(settings.genome_fasta, 'fasta'):
genome_len[chrf.id] = len(chrf.seq)
if settings.genes_gff:
try:
pos_feat_list, all_features = RILseq.read_gtf(
open(settings.genes_gff), settings.feature, settings.identifier)
except IOError:
settings.genes_gff = None
gcounts = {}
lib_order = []
fastq_1_list = list(RILseq.flat_list(settings.fastq_1))
fastq_2_list = list(RILseq.flat_list(settings.fastq_2))
for i, r1_name in enumerate(RILseq.flat_list(settings.fastq_1)):
try:
r2_name = fastq_2_list[i]
except IndexError:
r2_name = None
outhead = r1_name.rsplit('.', 1)[0]
libname = outhead.rsplit('/',1)[-1]
outhead = '%s_bwa'%libname
bamname = RILseq.run_bwa(
settings.bwa_exec, r1_name, r2_name,
os.path.abspath(settings.dirout), outhead, settings.allowed_mismatches,
os.path.abspath(settings.genome_fasta), settings.params_aln, settings.sampe_params,
settings.samse_params, settings.samtools_cmd)
samfile = pysam.AlignmentFile(bamname,'rb')
if settings.genes_gff:
lib_order.append(libname)
gcounts[libname] = RILseq.count_features(
pos_feat_list, samfile, settings.overlap,
rev=settings.reverse_complement)
if settings.create_wig:
outwigs = [open("%s/%s_coverage.wig"%(settings.dirout, fastq.split("_cutadapt")[0].split('/')[-1]), 'w')
for fastq in fastq_1_list]
coverage = RILseq.generate_wig(
samfile, rev=settings.reverse_complement, genome_lengths=genome_len)
RILseq.print_wiggle(
coverage, "%s_single_fragments_coverage"%libname,
"%s single fragments coverage"%libname, outwigs[i])
# Print the table of counts
if settings.genes_gff:
outtables = [open("%s/%s_counts.txt"%(settings.dirout, fastq.split("_cutadapt")[0].split('/')[-1]), 'w')
for fastq in fastq_1_list]
for i, r1_name in enumerate(fastq_1_list):
outt = csv.writer(outtables[i], delimiter='\t')
outt.writerow(['Gene name'] + lib_order)
for g in sorted(list(all_features)):
row_out = [g]
for libn in lib_order:
row_out.append(gcounts[libn][g])
outt.writerow(row_out)
return 0 # success
if __name__ == '__main__':
status = main()
sys.exit(status)
|
from lxml import etree
from modules.tier_1_crawler import Tier_1_Crawler
class Uniqlo_Crawler(Tier_1_Crawler):
''' This func is used by func: extract_data '''
def __get_genre_list(self):
return ["women","men","kids","baby"]
''' This func is used by func: extract_data '''
def __get_forbidden_big_cat(self):
return ["每週新品","熱門主題","特輯推薦","相關推薦"]
''' This func is used by func: generate_tier_1_info '''
def extract_data(self, supplier_source_path):
tier_1_info = dict()
texts = self.load_texts(supplier_source_path)
if texts:
print("正在清洗資料")
genres = self.__get_genre_list()
html = etree.HTML(texts)
#print(html) # <Element html at 0x24fe65a24c8>
#print(etree.tostring(html).decode())
for i, genre in enumerate(genres):
tier_1_info.setdefault(genre, dict())
common_genre_xpath = f"//a[@id='header_{genre}']/following-sibling::div"
big_cat_xpath = common_genre_xpath + "//span[@class='title']"
tags = html.xpath(big_cat_xpath)
big_cat_texts = [str(tag.text.replace("\n", "").strip()) for tag in tags]
forbidden_items = self.__get_forbidden_big_cat()
new_list = list()
for big_cat_text in big_cat_texts:
if any([forbidden_item in big_cat_text for forbidden_item in forbidden_items]):
continue
new_list.append(big_cat_text)
big_cat_texts = new_list
#print("big_categories:\n", big_cat_texts)
for big_cat_text in big_cat_texts:
if big_cat_text.count(" ") > 1:
big_cat_text = big_cat_text.replace(" ", "")
tier_1_info[genre].setdefault(big_cat_text,
dict())
if len(big_cat_texts) == 0:
print("({i+1}) {genre} cannot get big_categories.")
else:
for big_catID, big_cat_text in enumerate(big_cat_texts):
# mapping: big_category => sales_categories
sales_cat_xpath = common_genre_xpath + f"//span[contains(text(),'{big_cat_text}')]"
sales_cat_xpath += "/../../li/a[@class='']"
sales_cat_tags = html.xpath(sales_cat_xpath)
# (1) text of sales_cat
sales_cat_texts = list()
for tag in sales_cat_tags:
tmp = tag.text
if "/" in tmp:
tmp = ''.join(tmp.split())
else:
tmp = tmp.replace("\n", "").replace(" ", "").strip()
sales_cat_texts.append(tmp)
# (2) link of sales_cat
sales_cat_links = [tag.get("href").strip() for tag in sales_cat_tags]
if (any(sales_cat_texts) or any(sales_cat_links)) and (len(sales_cat_texts) == len(sales_cat_links)):
#print("texts:\n", sales_cat_texts)
#print("links:\n", sales_cat_links)
for i in range(max(len(sales_cat_texts), len(sales_cat_links))):
tier_1_info[genre][big_cat_text].setdefault(sales_cat_texts[i], sales_cat_links[i])
elif len(sales_cat_texts) != len(sales_cat_links):
print("not match!")
print("tier_1_info:")
print(tier_1_info)
print("成功產生結果: tier_1_info\n")
return tier_1_info
else:
print("[002] Because func: load_texts has error, cannot execute func: extract_data.")
return None
|
import os
import sys
class local():
def __init__(self,local):
self.local=local
def local_do(self):
try:
os.mkdir(self.local)
except:
pass
try:
open(self.local+".set_color.txt","w+")
open(self.local+".last_theme.txt","w+").write("LIGHT")
open(self.local+".local.txt","w+").write(self.local)
open(self.local+".lyrics.txt","a+")
except:
pass
|
import argparse
import csv
def main():
parser = argparse.ArgumentParser(description="Determine FASTQ file encoding")
# parser.add_argument("OUTFILE", help="Name to save to. If ends in '.gz', output will be gzipped")
parser.add_argument("FILE1", type=file,
help="?????")
parser.add_argument("FILE2", type=file,
help="?????")
args = parser.parse_args()
file1_reader = csv.reader(args.FILE1)
file1_genes = [row[0] for row in file1_reader if row[0]]
file2_reader = csv.reader(args.FILE2)
file2_genes = [row[0] for row in file2_reader if row[0]]
print file1_genes
print file2_genes
if __name__ == "__main__":
main()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .obscore import *
__all__ = ["ObsCoreMetadata"]
|
# MIT License
#
# Copyright (c) 2019
# Miguel Perales - miguelperalesbermejo@gmail.com
# Jose Manuel Caballero - jcaballeromunoz4@gmail.com
# Jose Antonio Martin - ja.martin.esteban@gmail.com
# Miguel Diaz - migueldiazgil92@gmail.com
# Jesus Blanco Garrido - jesusblancogarrido@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import subprocess
import threading
import dtkglobal
import wx
import json
class ManageMetadataTemplate(wx.Panel):
def __init__(self, parent):
super(ManageMetadataTemplate, self).__init__(parent)
self.InitUI()
def InitUI(self):
self.mainSizer = wx.GridBagSizer(4, 4)
self.metadataTemplatesLbl = wx.StaticText(self, label="Metadata Templates")
self.metadataTemplatesLbl.ToolTip = "List of Metadata Templates available."
self.metadataTemplatesComboBox = wx.ComboBox(self, style=wx.CB_READONLY)
self.metadataTemplatesComboBox.ToolTip = "List of Metadata Templates available."
self.metadataTemplatesComboBox.Items = dtkglobal.metadataTemplates
self.metadataTemplatesComboBox.Bind(wx.EVT_COMBOBOX, self.ChangeMetadataTemplate)
self.metadataTypesLbl = wx.StaticText(self, label="Metadata Types")
self.metadataTypesLbl.ToolTip = "List of Metadata Types."
self.metadataTypesListBox = wx.ListBox(self, style=wx.LB_MULTIPLE)
self.metadataTypesListBox.ToolTip = "List of Metadata Types."
self.metadataTypesListBox.Items = dtkglobal.metadataTypes
self.btnSaveMetadataTemplate = wx.Button(self, label="Save")
self.btnSaveMetadataTemplate.Bind(wx.EVT_BUTTON, self.SaveMetadataTemplate)
self.btnDeleteMetadataTemplate = wx.Button(self, label="Delete")
self.btnDeleteMetadataTemplate.Bind(wx.EVT_BUTTON, self.DeleteMetadataTemplate)
row = 0
col = 0
self.mainSizer.Add(self.metadataTemplatesLbl, pos=(row, col), flag=wx.EXPAND | wx.ALL | wx.ALIGN_LEFT, border=5)
row = 0
col = 2
self.mainSizer.Add(
self.metadataTemplatesComboBox,
pos=(row, col),
span=(0, 10),
flag=wx.EXPAND | wx.ALL | wx.ALIGN_RIGHT,
border=5,
)
row = 1
col = 0
self.mainSizer.Add(self.metadataTypesLbl, pos=(row, col), flag=wx.EXPAND | wx.ALL | wx.ALIGN_LEFT, border=5)
row = 1
col = 2
self.mainSizer.AddGrowableRow(row)
self.mainSizer.Add(
self.metadataTypesListBox, pos=(row, col), span=(0, 10), flag=wx.EXPAND | wx.ALL | wx.ALIGN_RIGHT, border=5
)
row = 2
col = 0
self.mainSizer.Add(
self.btnSaveMetadataTemplate, pos=(row, col), flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_LEFT, border=5
)
row = 2
col = 10
self.mainSizer.Add(
self.btnDeleteMetadataTemplate, pos=(row, col), flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_RIGHT, border=5
)
self.mainSizer.AddGrowableCol(col)
self.mainSizer.SetEmptyCellSize((0, 0))
self.Layout()
self.Fit()
self.SetSizer(self.mainSizer)
def ChangeMetadataTemplate(self, event):
self.metadataTypesListBox.SetSelection(-1)
if self.metadataTemplatesComboBox.GetValue() == "All":
for i in range(self.metadataTypesListBox.GetCount()):
self.metadataTypesListBox.SetSelection(i)
if self.metadataTemplatesComboBox.GetValue() in dtkglobal.metadataTemplatesSelection:
for line in dtkglobal.metadataTemplatesSelection[self.metadataTemplatesComboBox.GetValue()]:
self.metadataTypesListBox.SetStringSelection(line)
def DeleteMetadataTemplate(self, event):
metadataTemplateStr = self.metadataTemplatesComboBox.GetValue()
if metadataTemplateStr != "All" and metadataTemplateStr != "None":
if metadataTemplateStr in dtkglobal.metadataTemplates:
dlg = wx.MessageDialog(
self,
"Metadata template '" + metadataTemplateStr + "' will be removed from DTK. Please confirm.",
"DTK - Delete Metadata Template",
wx.YES_NO | wx.ICON_WARNING,
)
result = dlg.ShowModal()
if result == wx.ID_NO:
dlg.Destroy()
return
dtkglobal.metadataTemplatesSelection.pop(self.metadataTemplatesComboBox.GetValue())
dtkglobal.StoreMetadataTemplates()
dtkglobal.ReadMetadataTemplates()
self.Parent.GetPage(0).metadataTemplatesComboBox.Items = dtkglobal.metadataTemplates
self.Layout()
self.metadataTemplatesComboBox.SetSelection(0)
self.ChangeMetadataTemplate(event)
else:
dlg = wx.MessageDialog(
self,
"Metadata template '" + metadataTemplateStr + "' cannot be removed from DTK.",
"DTK - Delete Metadata Template",
wx.OK | wx.ICON_ERROR,
)
dlg.ShowModal()
dlg.Destroy()
return
def SaveMetadataTemplate(self, event):
metadataTemplateStr = self.metadataTemplatesComboBox.GetValue()
if metadataTemplateStr != "All" and metadataTemplateStr != "None":
if metadataTemplateStr in dtkglobal.metadataTemplates:
dlg = wx.MessageDialog(
self,
"Metadata template '" + metadataTemplateStr + "' will be updated. Please confirm.",
"DTK - Update Metadata Template",
wx.YES_NO | wx.ICON_WARNING,
)
result = dlg.ShowModal()
if result == wx.ID_NO:
dlg.Destroy()
return
metadataTypes = self.metadataTypesListBox.GetSelections()
strMetadataTypesList = []
for numberMType in metadataTypes:
strMType = self.metadataTypesListBox.GetString(numberMType)
strMetadataTypesList.append(strMType)
dtkglobal.metadataTemplatesSelection[metadataTemplateStr] = strMetadataTypesList
dtkglobal.StoreMetadataTemplates()
dtkglobal.ReadMetadataTemplates()
self.Parent.GetPage(0).metadataTemplatesComboBox.Items = dtkglobal.metadataTemplates
self.Layout()
else:
dlg = wx.MessageDialog(
self,
"Metadata template '" + metadataTemplateStr + "' cannot be modified.",
"DTK - Update Metadata Template",
wx.OK | wx.ICON_ERROR,
)
dlg.ShowModal()
dlg.Destroy()
return
class AddMetadataTemplate(wx.Panel):
def __init__(self, parent):
super(AddMetadataTemplate, self).__init__(parent)
self.InitUI()
def InitUI(self):
self.mainSizer = wx.GridBagSizer(5, 5)
self.metadataTemplatesLbl = wx.StaticText(self, label="Metadata Template name")
self.metadataTemplatesLbl.ToolTip = "List of Metadata Templates available."
self.metadataTemplateName = wx.TextCtrl(self)
self.metadataTemplateName.ToolTip = "Name of new Metadata Template."
self.metadataTypesLbl = wx.StaticText(self, label="Metadata Types")
self.metadataTypesLbl.ToolTip = "List of Metadata Types."
self.metadataTypesListBox = wx.ListBox(self, style=wx.LB_MULTIPLE)
self.metadataTypesListBox.ToolTip = "List of Metadata Types."
self.metadataTypesListBox.Items = dtkglobal.metadataTypes
self.btnAddMetadataTemplate = wx.Button(self, label="Add Metadata Template")
self.btnAddMetadataTemplate.Bind(wx.EVT_BUTTON, self.AddMetadataTemplate)
row = 0
col = 0
self.mainSizer.Add(
self.metadataTemplatesLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
row = 0
col = 1
self.mainSizer.Add(
self.metadataTemplateName,
pos=(row, col),
span=(0, 10),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_RIGHT,
border=5,
)
row = 1
col = 0
self.mainSizer.Add(
self.metadataTypesLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
row = 1
col = 1
self.mainSizer.AddGrowableRow(row)
self.mainSizer.Add(
self.metadataTypesListBox,
pos=(row, col),
span=(0, 10),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row = 2
col = 1
self.mainSizer.Add(
self.btnAddMetadataTemplate,
pos=(row, col),
span=(0, 10),
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_RIGHT,
border=5,
)
self.mainSizer.AddGrowableCol(col)
self.mainSizer.SetEmptyCellSize((0, 0))
self.Layout()
self.Fit()
self.SetSizer(self.mainSizer)
def ChangeMetadataTemplate(self, event):
self.metadataTypesListBox.SetSelection(-1)
if self.metadataTemplatesComboBox.GetValue() == "All":
for i in range(self.metadataTypesListBox.GetCount()):
self.metadataTypesListBox.SetSelection(i)
if self.metadataTemplatesComboBox.GetValue() in dtkglobal.metadataTemplatesSelection:
for line in dtkglobal.metadataTemplatesSelection[self.metadataTemplatesComboBox.GetValue()]:
self.metadataTypesListBox.SetStringSelection(line)
def AddMetadataTemplate(self, event):
metadataTemplateStr = self.metadataTemplateName.GetValue()
if len(metadataTemplateStr) == 0:
dlg = wx.MessageDialog(
self, "Please introduce a metadata template name.", "DTK - Add Metadata Template", wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
return
if (metadataTemplateStr == "All") or (metadataTemplateStr == "None"):
dlg = wx.MessageDialog(
self,
"Metadata template '"
+ metadataTemplateStr
+ "' already exist and cannot be changed, please introduce other metadata template name",
"DTK - Update Metadata Template",
wx.OK | wx.ICON_ERROR,
)
dlg.ShowModal()
dlg.Destroy()
return
if metadataTemplateStr in dtkglobal.metadataTemplates:
dlg = wx.MessageDialog(
self,
"Metadata template '"
+ metadataTemplateStr
+ "' already exist, please confirm if you want to update it",
"DTK - Update Metadata Template",
wx.YES_NO | wx.ICON_WARNING,
)
result = dlg.ShowModal()
if result == wx.ID_NO:
dlg.Destroy()
return
metadataTypes = self.metadataTypesListBox.GetSelections()
strMetadataTypesList = []
for numberMType in metadataTypes:
strMType = self.metadataTypesListBox.GetString(numberMType)
strMetadataTypesList.append(strMType)
dlg = wx.MessageDialog(
self,
"Metadata template '" + metadataTemplateStr + "' created.",
"DTK - Add Metadata Template",
wx.OK | wx.ICON_INFORMATION,
)
dlg.ShowModal()
dlg.Destroy()
dtkglobal.metadataTemplatesSelection[self.metadataTemplateName.GetValue()] = strMetadataTypesList
dtkglobal.StoreMetadataTemplates()
dtkglobal.ReadMetadataTemplates()
self.Parent.GetPage(0).metadataTemplatesComboBox.Items = dtkglobal.metadataTemplates
self.Layout()
class MetadataTemplatesFrame(wx.Frame):
def __init__(self, parent=None):
super(MetadataTemplatesFrame, self).__init__(parent, title="Metadata Templates")
myStream = dtkglobal.getImageStream()
myImage = wx.Image(myStream)
myBitmap = wx.Bitmap(myImage)
icon = wx.Icon()
icon.CopyFromBitmap(myBitmap)
self.SetIcon(icon)
# dtkglobal.MakeModal(self, True)
self.InitUI()
def InitUI(self):
self.panel = wx.Panel(self)
self.nb = wx.Notebook(self.panel)
self.nb.AddPage(ManageMetadataTemplate(self.nb), "Manage Metadata Templates")
self.nb.AddPage(AddMetadataTemplate(self.nb), "Add Metadata Template")
self.mainSizer = wx.GridBagSizer(5, 5)
row = 0
col = 0
self.mainSizer.Add(self.nb, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.AddGrowableCol(0)
self.mainSizer.AddGrowableRow(0)
self.panel.SetSizerAndFit(self.mainSizer)
self.mainSizer.Fit(self)
self.Layout()
self.Centre()
self.MinSize = self.Size
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Show()
def OnCloseWindow(self, event):
self.Destroy()
|
print("enter your config for the sudoku grid, 0 equals empty")
A1 = int(input("A1"))
A2 = int(input("A2"))
A3 = int(input("A3"))
A4 = int(input("A4"))
A5 = int(input("A5"))
A6 = int(input("A6"))
A7 = int(input("A7"))
A8 = int(input("A8"))
A9 = int(input("A9"))
B1 = int(input("B1"))
B2 = int(input("B2"))
B3 = int(input("B3"))
B4 = int(input("B4"))
B5 = int(input("B5"))
B6 = int(input("B6"))
B7 = int(input("B7"))
B8 = int(input("B8"))
B9 = int(input("B9"))
C1 = int(input("C1"))
C2 = int(input("C2"))
C3 = int(input("C3"))
C4 = int(input("C4"))
C5 = int(input("C5"))
C6 = int(input("C6"))
C7 = int(input("C7"))
C8 = int(input("C8"))
C9 = int(input("C9"))
D1 = int(input("D1"))
D2 = int(input("D2"))
D3 = int(input("D3"))
D4 = int(input("D4"))
D5 = int(input("D5"))
D6 = int(input("D6"))
D7 = int(input("D7"))
D8 = int(input("D8"))
D9 = int(input("D9"))
E1 = int(input("E1"))
E2 = int(input("E2"))
E3 = int(input("E3"))
E4 = int(input("E4"))
E5 = int(input("E5"))
E6 = int(input("E6"))
E7 = int(input("E7"))
E8 = int(input("E8"))
E9 = int(input("E9"))
F1 = int(input("F1"))
F2 = int(input("F2"))
F3 = int(input("F3"))
F4 = int(input("F4"))
F5 = int(input("F5"))
F6 = int(input("F6"))
F7 = int(input("F7"))
F8 = int(input("F8"))
F9 = int(input("F9"))
G1 = int(input("G1"))
G2 = int(input("G2"))
G3 = int(input("G3"))
G4 = int(input("G4"))
G5 = int(input("G5"))
G6 = int(input("G6"))
G7 = int(input("G7"))
G8 = int(input("68"))
G9 = int(input("G9"))
H1 = int(input("H1"))
H2 = int(input("H2"))
H3 = int(input("H3"))
H4 = int(input("H4"))
H5 = int(input("H5"))
H6 = int(input("H6"))
H7 = int(input("H7"))
H8 = int(input("H8"))
H9 = int(input("H9"))
I1 = int(input("I1"))
I2 = int(input("I2"))
I3 = int(input("I3"))
I4 = int(input("I4"))
I5 = int(input("I5"))
I6 = int(input("I6"))
I7 = int(input("I7"))
I8 = int(input("I8"))
I9 = int(input("I9"))
M = 9
def puzzle(a):
for i in range(M):
for j in range(M):
print(a[i][j],end = " ")
print()
def solve(grid, row, col, num):
for x in range(9):
if grid[row][x] == num:
return False
for x in range(9):
if grid[x][col] == num:
return False
startRow = row - row % 3
startCol = col - col % 3
for i in range(3):
for j in range(3):
if grid[i + startRow][j + startCol] == num:
return False
return True
def Suduko(grid, row, col):
if (row == M - 1 and col == M):
return True
if col == M:
row += 1
col = 0
if grid[row][col] > 0:
return Suduko(grid, row, col + 1)
for num in range(1, M + 1, 1):
if solve(grid, row, col, num):
grid[row][col] = num
if Suduko(grid, row, col + 1):
return True
grid[row][col] = 0
return False
'''0 means the cells where no value is assigned'''
grid = [[A1, A2, A3, A4, A5, A6, A7, A8, A9],
[B1, B2, B3, B4, B5, B6, B7, B8, B9],
[C1, C2, C3, C4, C5, C6, C7, C8, C9],
[D1, D2, D3, D4, D5, D6, D7, D8, D9],
[E1, E2, E3, E4, E5, E6, E7, E8, E9],
[F1, F2, F3, F4, F5, F6, F7, F8, F9],
[G1, G2, G3, G4, G5, G6, G7, G8, G9],
[H1, H2, H3, H4, H5, H6, H7, H8, H9],
[I1, I2, I3, I4, I5, I6, I7, I8, I9]]
if (Suduko(grid, 0, 0)):
puzzle(grid)
else:
print("Solution does not exist:(")
|
# This python file takes samples per second from a video
# And determines the amount of information that it has using summing all the pixel intensities in
# its grayscale version
import cv2
import numpy as np
import os
import time
import math
# Global variables
number_samples = 100
length_of_file = 60
# Take samples every one second
def extract_image_one_fps(video_source_path):
# number_samples = 100
length_of_file = 60
vidcap = cv2.VideoCapture(video_source_path)
count = 0
success = True
while success:
vidcap.set(cv2.CAP_PROP_POS_MSEC,((length_of_file/number_samples)*count*1000))
success,image = vidcap.read()
## Stop when last frame is identified
# image_last = cv2.imread("./photos/frame{}.png".format(count-1))
# if np.array_equal(image,image_last):
# break
cv2.imwrite("./photos/%d.jpg" % count, image) # save frame as PNG file
print ('{}reading a new frame: {} '.format(count,success))
count += 1
def take_l2_norm():
threshold = 10
for i in range(number_samples):
norm = 0
# Convert to grayscale
image = cv2.imread('./photos/{}.jpg'.format(i), 0)
rows, cols = image.shape
for row in range (rows ):
for col in range (cols ):
norm += image[row][col]
norm = pow(norm, 1/6)
print(norm)
if norm >= threshold:
print("This frame ./photos/{}.jpg contains noticable features".format(i))
else:
print()
print("This frame ./photos/{}.jpg doesnot contains noticable features".format(i))
if __name__ == "__main__":
try:
os.mkdir('photos')
except OSError:
pass
# extract_image_one_fps('anne-marie-live.mp4')
take_l2_norm()
|
import logging
from .base import * # noqa
logging.disable(logging.CRITICAL)
# Should only include explicit testing settings
SECRET_KEY = 'NOT A SECRET'
PROJECTS_ENABLED = True
PROJECTS_AUTO_CREATE = True
TRANSITION_AFTER_REVIEWS = 2
TRANSITION_AFTER_ASSIGNED = True
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
|
lista=[]
while True:
lista.append(int(input('Digite um valor : ')))
res = str(input('Deseja continuar ? [S/N] : '))
if res in 'Nn':
break
print('+='*30)
print(f'Voce digitou {len(lista)} numeros')
lista.sort(reverse=True) # organiza em ordem crescente
print(f'Voce digitou em ordem decrescente valores {lista}')
if 5 in lista:
print('O valor 5 esta na lista')
else:
print('O valor 5 nao esta na lista')
|
import commands
import sys,os
input_file=sys.argv[1]
current_path=commands.getoutput('pwd')
print 'Read input_file',input_file
#input file is final_combination3.csv
if os.path.isfile(input_file) == True:
#use this flow to convert file to nominal and saving as arff
file_kf=open('./convert2arff.kf','r')
file_kf_read=file_kf.read()
file_kf_read=file_kf_read.replace("csv_inputfile",current_path+'/'+input_file)
file_kf_read=file_kf_read.replace("outputpath",current_path)
file_kf_read=file_kf_read.replace("outputfilename",'converted_input.arff')
file_kf.close()
file_kf_new=open('convert2arffRUN.kf','w')
file_kf_new.write(file_kf_read)
file_kf_new.close()
num_c=1
while True: #to make sure that converted_input.arff is created
print 'Check converted_input.arff , round ',num_c
print commands.getoutput('java -cp ./weka-3-9-1/weka.jar -Xmx24000m -Xss5000m weka.knowledgeflow.FlowRunner convert2arffRUN.kf')
num_c+=1
if int(os.path.getsize("converted_input.arff")) > 0 :
print 'Pass!! : converted_input.arff is created'
break
commands.getoutput("sed \"s/@attribute \' label\' {\' photo\'}/@attribute \' label\' {\' photo\',\' non_photo\'}/g\" converted_input.arff > "+sys.argv[2]+"_converted_input.arff")
else:
print 'No input file => ',input_file
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2019) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: oneview_server_profile_template_facts
short_description: Retrieve facts about the Server Profile Templates from OneView.
description:
- Retrieve facts about the Server Profile Templates from OneView.
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpOneView >= 5.0.0"
author: "Bruno Souza (@bsouza)"
options:
name:
description:
- Server Profile Template name.
uri:
description:
- Server Profile Template uri.
options:
description:
- "List with options to gather additional facts about Server Profile Template resources.
Options allowed: C(new_profile), C(transformation) and C(available_networks)."
notes:
- The option C(transformation) is only available for API version 300 or later.
- The option C(available_networks) is only available for API version 600 or later.
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Server Profile Templates
oneview_server_profile_template_facts:
config: "{{ config }}"
- debug: var=server_profile_templates
- name: Gather paginated, filtered and sorted facts about Server Profile Templates
oneview_server_profile_template_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
params:
start: 0
count: 3
sort: name:ascending
filter: macType='Virtual'
scope_uris: /rest/scopes/af62ae65-06b2-4aaf-94d3-6a92562888cf
delegate_to: localhost
- debug: var=server_profile_templates
- name: Gather facts about a Server Profile Template by name
oneview_server_profile_template_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
name: "ProfileTemplate101"
- name: Gather facts about a Server Profile by uri
oneview_server_profile_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
uri: /rest/server-profile-templates/c0868397-eff6-49ed-8151-4338702792d3
delegate_to: localhost
- name: Gather facts about a template and a profile with the configuration based on this template
oneview_server_profile_template_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
name: "ProfileTemplate101"
options:
- new_profile
- name: Gather facts about available networks.
oneview_server_profile_template_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
options:
- available_networks:
serverHardwareTypeUri: "/rest/server-hardware-types/253F1D49-0FEE-4DCD-B14C-B26234E9D414"
enclosureGroupUri: "/rest/enclosure-groups/293e8efe-c6b1-4783-bf88-2d35a8e49071"
delegate_to: localhost
'''
RETURN = '''
server_profile_templates:
description: Has all the OneView facts about the Server Profile Templates.
returned: Always, but can be null.
type: dict
new_profile:
description: A profile object with the configuration based on this template.
returned: When requested, but can be null.
type: dict
server_profile_template_available_networks:
description: Has all the facts about the list of Ethernet networks, Fibre Channel networks and network sets that
are available to the server profile along with their respective ports.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class ServerProfileTemplateFactsModule(OneViewModule):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict'),
uri=dict(type='str')
)
def __init__(self):
super(ServerProfileTemplateFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.set_resource_object(self.oneview_client.server_profile_templates)
def execute_module(self):
name = self.module.params.get("name")
uri = self.module.params.get("uri")
if name or uri:
facts = self.__get_options(name, uri)
elif self.options and self.options.get("available_networks"):
network_params = self.options["available_networks"]
facts = {"server_profile_template_available_networks": self.resource_client.get_available_networks(**network_params)}
else:
facts = self.__get_all()
return dict(changed=False, ansible_facts=facts)
def __get_options(self, name, uri):
if not name and uri:
self.current_resource = self.resource_client.get_by_uri(uri)
if not self.current_resource:
return dict(server_profile_templates=[])
facts = dict(server_profile_templates=[self.current_resource.data])
if self.options:
if "new_profile" in self.options:
facts["new_profile"] = self.current_resource.get_new_profile()
if "transformation" in self.options:
tranformation_data = self.options.get('transformation')
facts["transformation"] = self.current_resource.get_transformation(**tranformation_data)
return facts
def __get_all(self):
templates = self.resource_client.get_all(**self.facts_params)
return dict(server_profile_templates=templates)
def main():
ServerProfileTemplateFactsModule().run()
if __name__ == '__main__':
main()
|
from TAC_serialize import *
from tacgen import TACIndexer
from ctypes import c_int
#enum for type of object
class DataType:
Repr = ['Object', 'Int', 'Bool', 'String']
Object = 0
Int = 1
Bool = 2
String = 3
#enum for position on lattice
class DataLattice:
Repr = ['Bottom', 'Mid', 'Top']
Bottom = 0
Mid = 1
Top = 2
#struct for information about a virtual register
class DataInfo:
def __init__(self, dtype, dlat, args):
self.dtype = dtype
self.dlat = dlat
#args member contains different type depending on dtype
# for String, it is a string
# for Int, it is a two tuple (int lower bound, int upper bound)
# for Bool, it is True or False
# for Object, it is a two tuple (bool isvoid?, string strictest type)
#if dlat is Top or Bottom then this member is invalid
self.args = args
#if int, deal with overflow
if self.dtype == DataType.Int and self.dlat == DataLattice.Mid:
self.args = (c_int(self.args[0] & 0xffffffff).value, c_int(self.args[1] & 0xffffffff).value)
def copy(self):
argscpy = self.args
if isinstance(self.args, list) or isinstance(self.args, tuple):
argscpy = self.args[:]
return DataInfo(self.dtype, self.dlat, argscpy)
def __str__(self):
ret = '[' + DataType.Repr[self.dtype] + ', ' + DataLattice.Repr[self.dlat]
if self.dlat == DataLattice.Mid:
ret += ', '
if self.dtype == DataType.Object:
ret += '(' + ('Void' if self.args[0] else 'NonVoid') + ', ' + self.args[1] + ')'
else:
ret += str(self.args)
ret += ']'
return ret
#global mapping for const propagation of arguments to method calls
#TACCall -> lst of (TACRegister, DataInfo)
gCallArgs = {}
#global set of TACCalls that can be removed because constant propagation
gCallKill = {}
#apply constant propagation
def optCFG(cfg):
if not globalFlow(cfg):
#print 'NO CFG OPT\n===\n'
return cfg #dataflow failed to run in time
#update conditional branches if we know them ahead of time
for block in cfg.blocks:
lastins = block.last()
if isinstance(lastins, TACBT):
if block.dataOut[lastins.cond].dlat == DataLattice.Mid:
#branch is taken
if block.dataOut[lastins.cond].args:
block.instructions[-1] = TACJmp(lastins.label)
badblock = [b for b in block.children if not isinstance(b.first(), TACLabel) or b.first().name != lastins.label][0]
block.children = [b for b in block.children if b != badblock]
badblock.parents = [b for b in badblock.parents if b != block]
#branch not taken
else:
block.instructions = block.instructions[:-1]
badblock = [b for b in block.children if isinstance(b.first(), TACLabel) and b.first().name == lastins.label][0]
block.children = [b for b in block.children if b != badblock]
badblock.parents = [b for b in badblock.parents if b != block]
elif isinstance(lastins, TACBTypeEq):
if block.dataOut[lastins.obj].dlat == DataLattice.Mid:
if (block.dataOut[lastins.obj].dtype == DataType.Object and block.dataOut[lastins.obj].args[1] == lastins.dtype) \
or (block.dataOut[lastins.obj].dtype == DataType.Int and 'Int' == lastins.dtype) \
or (block.dataOut[lastins.obj].dtype == DataType.Bool and 'Bool' == lastins.dtype) \
or (block.dataOut[lastins.obj].dtype == DataType.String and 'String' == lastins.dtype):
block.instructions[-1] = TACJmp(lastins.label)
badblock = [b for b in block.children if not isinstance(b.first(), TACLabel) or b.first().name != lastins.label][0]
block.children = [b for b in block.children if b != badblock]
badblock.parents = [b for b in badblock.parents if b != block]
else:
block.instructions = block.instructions[:-1]
badblock = [b for b in block.children if isinstance(b.first(), TACLabel) and b.first().name == lastins.label][0]
block.children = [b for b in block.children if b != badblock]
badblock.parents = [b for b in badblock.parents if b != block]
#drop blocks that are unreachable TODO check for unreachable cycles
while True:
unreachable = [b for i, b in enumerate(cfg.blocks) if (i > 0 and len(b.parents) == 0) or len(b.instructions) == 0]
if len(unreachable) == 0:
break
for block in cfg.blocks:
block.parents = [b for b in block.parents if b not in unreachable]
cfg.blocks = [b for b in cfg.blocks if b not in unreachable]
#scan over args in TACCall instructions to replace with constants if possible
for block in cfg.blocks:
newinslst = block.instructions[:] #make shallow copy of instructions
for ins in block.instructions:
if not isinstance(ins, TACCall):
continue
#handle primitives through calls (i.e. string operations)
if ins in gCallKill:
ind = newinslst.index(ins)
if gCallKill[ins].dtype == DataType.Int:
newins = TACConstant(TACIndexer.returnReg, 'int', gCallKill[ins].args[0])
elif gCallKill[ins].dtype == DataType.Bool:
newins = TACConstant(TACIndexer.returnReg, 'bool', gCallKill[ins].args)
elif gCallKill[ins].dtype == DataType.String:
newins = TACConstant(TACIndexer.returnReg, 'string', gCallKill[ins].args)
newinslst = newinslst[:ind] + [newins] + newinslst[ind+1:] #take out call
continue
if ins not in gCallArgs:
continue
arglst = []
for arg in ins.args:
dinfo = [datum[1] for datum in gCallArgs[ins] if datum[0] == arg]
if len(dinfo) == 0:
continue
dinfo = dinfo[0]
if dinfo.dlat != DataLattice.Mid or dinfo.dtype == DataType.Object:
continue #only dealing with known primitives
if arg.boxed:
continue #primitives are boxed when they need to act as Objects
if dinfo.dtype == DataType.Int and dinfo.args[0] == dinfo.args[1]:
argins = TACConstant(arg, 'int', dinfo.args[0])
elif dinfo.dtype == DataType.Bool:
argins = TACConstant(arg, 'bool', dinfo.args)
elif dinfo.dtype == DataType.String:
argins = TACConstant(arg, 'string', dinfo.args)
arglst.append(argins)
ind = newinslst.index(ins)
newinslst = newinslst[:ind] + arglst + newinslst[ind:] #prepend constant instructions before call
block.instructions = newinslst
return cfg
def strSwitch(mstr, mode):
if mode:
mstr = mstr.replace('\\', '\\\\')
mstr = mstr.replace('\"', '\\\"')
else:
mstr = mstr.replace('\\\\', '\\')
mstr = mstr.replace('\\\"', '\"')
return mstr
#peephole opt on TAC instructions
#remove jmp and labels when they are next to each other
def cullLabels(cfg):
labelref = {}
inslst = cfg.toList()
#count the number of references to a label
for ins in inslst:
if isinstance(ins, TACBT) or isinstance(ins, TACJmp) or isinstance(ins, TACBTypeEq):
if ins.label not in labelref:
labelref[ins.label] = 0
labelref[ins.label] += 1
elif isinstance(ins, TACLabel):
if ins.name not in labelref:
labelref[ins.name] = 0
rmlst = []
#keep track of adjacent jump/label instructions when the label is only referenced by that jump
for i in range(len(inslst)):
if i == 0: #do not kill method start label
continue
if (isinstance(inslst[i-1], TACJmp) or isinstance(inslst[i-1], TACBT) or isinstance(ins, TACBTypeEq))\
and isinstance(inslst[i], TACLabel) and inslst[i-1].label == inslst[i].name and labelref[inslst[i].name] == 1:
rmlst.append(i-1)
rmlst.append(i)
elif isinstance(inslst[i], TACLabel) and labelref[inslst[i].name] == 0:
rmlst.append(i)
inslst = [ins for i, ins in enumerate(inslst) if i not in rmlst]
return _constructCFG(inslst)
#continue propagating information between basic blocks until fixed point is reached
def globalFlow(cfg):
for block in cfg.blocks:
vregs = enumerateReg(block)
initData = {}
for vreg in vregs:
initData[vreg] = DataInfo(DataType.Object, DataLattice.Bottom, None)
block.dataIn = initData
block.dataOut = initData
changed = True
i = 0
while changed:
changed = False
for block in cfg.blocks:
lstIn = [block.dataIn]
for parent in block.parents:
lstIn.append(parent.dataOut)
block.dataIn = join(lstIn)
changed = changed or forwardFlow(block)
i += 1
#ideally we reach a fixed point but for compilation speed we eventually give up
if i > 100:
return False
return True
#modifies block.dataOut in place and returns true if a change was made
def forwardFlow(block):
dataOut = dataCopy(block.dataOut)
mdata = dataCopy(block.dataIn)
for ins in block.instructions:
mdata = transfer(ins, mdata)
block.dataOut = mdata
return not dataCompare(dataOut, block.dataOut)
#returns a data dict corresponding to addition of information by TAC ins
#return value is the parameter modified in place
def transfer(ins, data):
dataOut = data
if isinstance(ins, TACOp2):
if ins.op1 not in data or ins.op2 not in data or data[ins.op1].dlat != DataLattice.Mid or data[ins.op2].dlat != DataLattice.Mid:
dataOut[ins.assignee] = DataInfo(DataType.Object, DataLattice.Top, None)
return dataOut
if ins.opcode == '+':
dataOut[ins.assignee] = DataInfo(DataType.Int, DataLattice.Mid, \
(data[ins.op2].args[0] + data[ins.op1].args[0], data[ins.op2].args[1] + data[ins.op1].args[1]))
elif ins.opcode == '-':
dataOut[ins.assignee] = DataInfo(DataType.Int, DataLattice.Mid, \
(data[ins.op2].args[0] - data[ins.op1].args[0], data[ins.op2].args[1] - data[ins.op1].args[1]))
elif ins.opcode == '*':
dataOut[ins.assignee] = DataInfo(DataType.Int, DataLattice.Mid, \
(data[ins.op2].args[0] * data[ins.op1].args[0], data[ins.op2].args[1] * data[ins.op1].args[1]))
elif ins.opcode == '/':
if data[ins.op1].args[0] == 0 or data[ins.op1].args[1] == 0:
dataOut[ins.assignee] = DataInfo(DataType.Object, DataLattice.Top, None)
return dataOut
else:
#Python integer division is spooky
dataOut[ins.assignee] = DataInfo(DataType.Int, DataLattice.Mid, \
(int(float(data[ins.op2].args[0]) / data[ins.op1].args[0]), int(float(data[ins.op2].args[1]) / data[ins.op1].args[1])))
elif ins.opcode == '<':
if data[ins.op1].dtype == DataType.Int and data[ins.op2].dtype == DataType.Int:
#if upper bound of op1 is less than lower bound of op2 then certainly true
if data[ins.op1].args[1] < data[ins.op2].args[0]:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, True)
#if lower bound of op1 is greater than upper bound of op2 then certainly false
elif data[ins.op1].args[0] > data[ins.op2].args[1]:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, False)
#otherwise we do not know
else:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Top, None)
elif data[ins.op1].dtype == DataType.Bool and data[ins.op2].dtype == DataType.Bool:
if data[ins.op1].args == False and data[ins.op2].args == True:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, True)
else:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, False)
elif data[ins.op1].dtype == DataType.String and data[ins.op2].dtype == DataType.String:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, data[ins.op1].args < data[ins.op2].args)
else:
#better Object lattice might be able to do compare
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Top, None)
elif ins.opcode == '<=':
if data[ins.op1].dtype == DataType.Int and data[ins.op2].dtype == DataType.Int:
if data[ins.op1].args[1] <= data[ins.op2].args[0]:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, True)
elif data[ins.op1].args[0] > data[ins.op2].args[1]:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, False)
else:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Top, False)
elif data[ins.op1].dtype == DataType.Bool and data[ins.op2].dtype == DataType.Bool:
if (data[ins.op1].args == False and data[ins.op2].args == True) or data[ins.op1].args == data[ins.op2].args:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, True)
else:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, False)
elif data[ins.op1].dtype == DataType.String and data[ins.op2].dtype == DataType.String:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, data[ins.op1].args <= data[ins.op2].args)
else:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Top, None)
elif ins.opcode == '=':
if data[ins.op1].dtype == DataType.Int and data[ins.op2].dtype == DataType.Int:
if data[ins.op1].args == data[ins.op2].args:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, True)
#if both ops have a definite number and they are not the same then certainly false
elif data[ins.op1].args[0] == data[ins.op1].args[1] and data[ins.op2].args[0] == data[ins.op2].args[1] \
and data[ins.op1].args != data[ins.op2].args:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, False)
else:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Top, False)
elif data[ins.op1].dtype == DataType.Bool and data[ins.op2].dtype == DataType.Bool:
if data[ins.op1].args == data[ins.op2].args:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, True)
else:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, False)
elif data[ins.op1].dtype == DataType.String and data[ins.op2].dtype == DataType.String:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, data[ins.op1].args == data[ins.op2].args)
else:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Top, False)
#correct the bounds if the lower bound is greater than the upper
if dataOut[ins.assignee].dtype == DataType.Int and dataOut[ins.assignee].args[0] > dataOut[ins.assignee].args[1]:
dataOut[ins.assignee].args = (dataOut[ins.assignee].args[1], dataOut[ins.assignee].args[0])
elif isinstance(ins, TACOp1):
if ins.op1 not in data or data[ins.op1].dlat != DataLattice.Mid:
dataOut[ins.assignee] = DataInfo(DataType.Object, DataLattice.Top, None)
return dataOut
if ins.opcode == 'not':
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, not data[ins.op1].args)
if ins.opcode == '~':
dataOut[ins.assignee] = DataInfo(DataType.Int, DataLattice.Mid, (-data[ins.op1].args[1], -data[ins.op1].args[0]))
if ins.opcode == 'isvoid':
if data[ins.op1].dtype in [DataType.Int, DataType.Bool, DataType.String]:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, False)
else:
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, data[ins.op1].args[0])
elif isinstance(ins, TACAllocate):
if ins.ptype == 'Int':
dataOut[ins.assignee] = DataInfo(DataType.Int, DataLattice.Mid, (0, 0))
elif ins.ptype == 'Bool':
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, False)
elif ins.ptype == 'String':
dataOut[ins.assignee] = DataInfo(DataType.String, DataLattice.Mid, '')
elif ins.allop == 'default':
dataOut[ins.assignee] = DataInfo(DataType.Object, DataLattice.Mid, (True, ins.ptype))
else: #new
dataOut[ins.assignee] = DataInfo(DataType.Object, DataLattice.Mid, (False, ins.ptype))
elif isinstance(ins, TACCall):
#precompute string methods
success = False
if not isinstance(ins.funcname, TACRegister) and ins.funcname[:ins.funcname.index('.')] == 'String':
mname = ins.funcname[ins.funcname.index('.')+1:]
if mname == 'substr':
if ins.args[0] in data and ins.args[1] in data and ins.args[2] in data and \
data[ins.args[0]].dlat == DataLattice.Mid and data[ins.args[1]].dlat == DataLattice.Mid and \
data[ins.args[2]].dlat == DataLattice.Mid:
istr = strSwitch(data[ins.args[0]].args, False)
start = data[ins.args[1]].args
length = data[ins.args[2]].args
if start[0]==start[1] and length[0]==length[1] and start[0] >= 0 and start[1] + length[1] <= len(istr):
dataOut[ins.assignee] = DataInfo(DataType.String, DataLattice.Mid,
strSwitch(istr[start[0]:start[0]+length[0]], True))
success = True
elif mname == 'concat':
if ins.args[0] in data and ins.args[1] in data and \
data[ins.args[0]].dlat == DataLattice.Mid and data[ins.args[1]].dlat == DataLattice.Mid:
istr = strSwitch(data[ins.args[0]].args, False)
cstr = strSwitch(data[ins.args[1]].args, False)
dataOut[ins.assignee] = DataInfo(DataType.String, DataLattice.Mid, strSwitch(istr + cstr, True))
success = True
elif mname == 'length':
if ins.args[0] in data and data[ins.args[0]].dlat == DataLattice.Mid:
istr = strSwitch(data[ins.args[0]].args, False)
dataOut[ins.assignee] = DataInfo(DataType.Int, DataLattice.Mid, (len(istr), len(istr)))
success = True
if not success:
dataOut[ins.assignee] = DataInfo(DataType.Object, DataLattice.Top, None)
#save information about arguments for possible constant propagation
gCallArgs[ins] = []
for arg in ins.args:
if arg in dataOut:
gCallArgs[ins].append((arg, dataOut[arg]))
else:
gCallKill[ins] = dataOut[ins.assignee] #save info for constant propagation
elif isinstance(ins, TACConstant):
if ins.ptype == 'int':
dataOut[ins.assignee] = DataInfo(DataType.Int, DataLattice.Mid, (int(ins.const), int(ins.const)))
elif ins.ptype == 'bool':
dataOut[ins.assignee] = DataInfo(DataType.Bool, DataLattice.Mid, str(ins.const) == 'true')
elif ins.ptype == 'string':
dataOut[ins.assignee] = DataInfo(DataType.String, DataLattice.Mid, ins.const)
elif isinstance(ins, TACAssign):
if ins.assignor not in data or isinstance(ins.assignor, TACClassAttr) or isinstance(ins.assignor, TACMethodArg):
dataOut[ins.assignee] = DataInfo(DataType.Object, DataLattice.Top, None)
else:
if isinstance(ins.assignee, TACRegister):
dataOut[ins.assignee] = data[ins.assignor].copy()
elif isinstance(ins.assignee, TACClassAttr):
dataOut[ins.assignee.reg] = DataInfo(DataType.Object, DataLattice.Top, None)
dataOut[ins.assignee] = DataInfo(DataType.Object, DataLattice.Top, None)
"""
#if we are assigning to a boxed Int/Bool's val we mark the boxed reg
if ins.assignee.cname == 'Int' and ins.assignee.aname == 'val':
dataOut[ins.assignee.reg] = data[ins.assignor].copy()
elif ins.assignee.cname == 'Bool' and ins.assignee.aname == 'val':
if data[ins.assignor].dtype == DataType.Int and data[ins.assignor].dlat == DataLattice.Mid:
dataOut[ins.assignee.reg] = DataInfo(DataType.Bool, DataLattice.Mid, 'true' if data[ins.assignor].args != 0 else 'false')
else:
dataOut[ins.assignee.reg] = data[ins.assignor].copy()
else:
dataOut[ins.assignee.reg] = DataInfo(DataType.Object, DataLattice.Top, None)
"""
return dataOut
#joins multiple data dicts from multiple incoming edges of cfg
def join(lstdata):
vregs = set()
for data in lstdata:
for key in data:
vregs.add(key)
joined = {}
for vreg in vregs:
joined[vreg] = DataInfo(DataType.Object, DataLattice.Bottom, None)
for data in lstdata:
if vreg in data:
joined[vreg] = dataLub(joined[vreg], data[vreg])
return joined
#least upper bound function for DataInfo
def dataLub(a, b):
#if one is bottom return the other
if a.dlat == DataLattice.Bottom:
return b
if b.dlat == DataLattice.Bottom:
return a
#if one is top then return it
if a.dlat == DataLattice.Top:
return a
if b.dlat == DataLattice.Top:
return b
#must be Mid
#if the two are of incompatible types return Top
if a.dtype != b.dtype:
return DataInfo(DataType.Object, DataLattice.Top, None)
#if Int then widen the range of args
if a.dtype == DataType.Int:
#NOTE omitting because will never terminate on while loops
#TODO decide when to the range in args (e.g. when not a loop)
#return DataInfo(DataType.Int, DataLattice.Mid, \
# (a.args[0] if a.args[0] < b.args[0] else b.args[0], a.args[1] if a.args[1] > b.args[1] else b.args[1]))
if a.args == b.args:
return a
else:
return DataInfo(DataType.Int, DataLattice.Top, None)
#if Bool they must be the same to avoid Top
if a.dtype == DataType.Bool:
if a.args == b.args:
return a
else:
return DataInfo(DataType.Bool, DataLattice.Top, None)
#if String they must be the same to avoid Top
if a.dtype == DataType.String:
if a.args == b.args:
return a
else:
return DataInfo(DataType.String, DataLattice.Top, None)
#must be Object
#compare void/nonvoid and type
if a.args == b.args:
return a
else:
return DataInfo(DataType.Object, DataLattice.Top, None)
#returns a copy of data dict - keys are same, but data is duplicate
def dataCopy(dataIn):
dataOut = {}
for d in dataIn:
dataOut[d] = dataIn[d].copy()
return dataOut
#return true if data dicts a and b are equivalent
def dataCompare(a, b):
for key in a:
if key not in b:
return False
if a[key].dlat != b[key].dlat:
return False
if a[key].dlat == DataLattice.Mid:
if a[key].args != b[key].args or a[key].dtype != b[key].dtype:
return False
for key in b:
if key not in a:
return False
return True
def enumerateReg(block):
vregs = set()
for ins in block.instructions:
if isinstance(ins, TACOp):
vregs.add(ins.op1)
if isinstance(ins, TACOp2):
vregs.add(ins.op2)
if isinstance(ins, TACAssign):
vregs.add(ins.assignor)
if hasattr(ins, 'assignee'):
vregs.add(ins.assignee)
return vregs
import sys
import tacgen
from TAC_serialize import _constructCFG
from tacgen import TACIndexer
if __name__ == '__main__':
cmap, imap, pmap, ast = tacgen.readClType(sys.argv[1])
TACIndexer.setTypeMaps(cmap, imap, pmap)
tacgen.attrConvert(ast)
tacgen.implConvert(ast)
taclist = TACIndexer.inslst
methlist = []
lastInd = 0
for i in range(len(taclist)):
# Split taclist on method labels
if isinstance(taclist[i], TACLabel) and taclist[i].name[0] != '.':
if i != 0:
methlist.append(taclist[lastInd:i])
lastInd = i
methlist.append(taclist[lastInd:])
for meth in methlist:
cfg = _constructCFG(meth)
globalFlow(cfg)
print cfg
print cfg.verbosestr()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#传球项目1状态机:
#流程:机器前往传球区并放下铲子 -> 传球 -> 前进到找球的位置 -> 开始检测球(以原地自转的方式) -> 检测到球后接近球到球前方0.8米处
# -> 再次检测球并调整铲子方向后前进 -> 铲球 -> 调整机器角度朝向传球区 -> 投球 -> 升起铲子 -> 回家
import rospy
import smach
import math
import smach_ros
from robot_state_class.robot_state_class import *
def pass_first():
pass_ball_first = smach.StateMachine(outcomes=['succeed', 'failed'])
rospy.logwarn("pass_ball_first STARTING!!!")
with pass_ball_first:
smach.StateMachine.add('Start', OutHome(),
transitions={'succeed':'MoveToPass',
'failed':'failed'})
MoveToPass = smach.Concurrence(outcomes=['succeed','failed'],
default_outcome='failed',
outcome_map={'succeed':{'ShovelUp1':'succeed',
'MoveToMidline1':'succeed'}})
with MoveToPass:
smach.Concurrence.add('ShovelUp1', ShovelUp())
smach.Concurrence.add('MoveToMidline1', MoveToMidlinePass())
smach.StateMachine.add('MoveToPass', MoveToPass,
transitions={'succeed':'SHOOT1',
'failed':'failed'})
smach.StateMachine.add('SHOOT1',Shoot(),
transitions={'succeed':'MoveToSearch',
'failed':'failed'})
smach.StateMachine.add('MoveToSearch', MoveToMidlineSearch(),
transitions={'succeed':'FindBall',
'failed':'failed'})
smach.StateMachine.add('FindBall', MidlineSearchBasketball(),
transitions={'succeed': 'Shovel_Down',
'failed': 'Return'})
smach.StateMachine.add('Shovel_Down', ShovelUp(),
transitions={'succeed': 'ADJUST2',
'failed':'failed'})
smach.StateMachine.add('ADJUST2',MoveToMidlinePass(),
transitions={'succeed':'SHOOT2',
'failed':'failed'})
smach.StateMachine.add('SHOOT2',Shoot(),
transitions={'succeed':'Return',
'failed':'failed'})
smach.StateMachine.add('Return', ReturnHome(),
transitions={'succeed':'succeed',
'failed':'failed'})
pass_ball_first.execute()
rospy.logerr('Pass_Ball_First has finished!!!!')
if __name__ == '__main__':
rospy.init_node('passBall_first')
rospy.sleep(19.9)
start_time = rospy.get_time()
pass_first()
end_time = rospy.get_time()
print (end_time - start_time)
|
"""Contains core classes for planners
Code structures are borrowed from simple_rl(https://github.com/david-abel/simple_rl)
"""
from abc import ABC, abstractmethod
class Planner(ABC):
def __init__(self, name: str):
self._name = name
@property
def name(self):
return self._name
@abstractmethod
def plan(self, state, horizon):
pass
@abstractmethod
def policy(self, state):
pass
def __str__(self):
return self._name
|
#!/usr/bin/env python
from typing import Union
import numpy as np
import mapel.roommates.models.euclidean as euclidean
import mapel.roommates.models.impartial as impartial
import mapel.roommates.models.mallows as mallows
import mapel.roommates.models.urn as urn
import mapel.roommates.models.group_separable as group_separable
def generate_votes(model_id: str = None, num_agents: int = None,
params: dict = None) -> Union[list, np.ndarray]:
main_models_with_params = {
'roommates_norm-mallows': mallows.generate_roommates_norm_mallows_votes,
'roommates_urn': urn.generate_roommates_urn_votes,
'roommates_euclidean': euclidean.generate_roommates_euclidean_votes,
'roommates_reverse_euclidean': euclidean.generate_roommates_reverse_euclidean_votes,
'roommates_gs': group_separable.generate_roommates_gs_votes,
'roommates_radius': euclidean.generate_roommates_radius_votes,
'roommates_double': euclidean.generate_roommates_double_votes,
'roommates_mallows_euclidean': euclidean.generate_roommates_mallows_euclidean_votes,
'roommates_vectors': euclidean.generate_roommates_vectors_votes,
'roommates_malasym': mallows.generate_roommates_malasym_votes,
'roommates_group_ic': impartial.generate_roommates_group_ic_votes,
}
main_models_without_params = {
'roommates_ic': impartial.generate_roommates_ic_votes,
'roommates_id': impartial.generate_roommates_id_votes,
'roommates_chaos': impartial.generate_roommates_chaos_votes,
'roommates_symmetric': impartial.generate_roommates_symmetric_votes,
'roommates_asymmetric': impartial.generate_roommates_asymmetric_votes,
}
if model_id in main_models_with_params:
return main_models_with_params.get(model_id)(num_agents=num_agents, params=params)
if model_id in main_models_without_params:
return main_models_without_params.get(model_id)(num_agents=num_agents)
else:
print("No such election model_id!", model_id)
return []
# # # # # # # # # # # # # # # #
# LAST CLEANUP ON: 16.03.2022 #
# # # # # # # # # # # # # # # #
|
#!/opt/bin/lv_micropython -i
# change the above to the path of your lv_micropython -i unix binary
#
import time
#
# initialize lvgl
#
import lvgl as lv
import display_driver
from lv_colors import lv_colors
style = lv.style_t()
style.init()
# Set a background color and a radius
style.set_radius(lv.STATE.DEFAULT, 5)
style.set_bg_opa(lv.STATE.DEFAULT, lv.OPA.COVER)
style.set_bg_color(lv.STATE.DEFAULT, lv_colors.SILVER)
# Add a shadow
style.set_shadow_width(lv.STATE.DEFAULT, 8)
style.set_shadow_color(lv.STATE.DEFAULT, lv_colors.BLUE)
style.set_shadow_ofs_x(lv.STATE.DEFAULT, 10)
style.set_shadow_ofs_y(lv.STATE.DEFAULT, 20)
# Create an object with the new style
obj = lv.obj(lv.scr_act(), None)
obj.add_style(lv.obj.PART.MAIN, style)
obj.align(None, lv.ALIGN.CENTER, 0, 0)
|
# Copyright (c) 2010-2012 Massachusetts Institute of Technology.
# MIT License (cf. MIT-LICENSE.txt or http://www.opensource.org/licenses/mit-license.php)
from django.conf.urls.defaults import *
urlpatterns = patterns('img.views',
url(r'^js/textareas/(?P<name>.+)/$', 'textareas_js', name='tinymce-js'),
|
#crie um algoritmo que leia um número
#e mostre seu dobro, triplo e raiz quadrada
n = int(input('Digite um número: '))
d = n * 2
t = n * 3
r = n ** (1/2)
print('O dobro de {} é {}, o triplo é {} e a raíz quadarada é {:.3f}'.format(n,d,t,r))
|
#!/usr/bin/env python3
import logging as log
from lxml import etree
import copy
import math as m
from enum import Enum
import converter
from objects import *
import gazebo_objects as go
# Files pathes
SAMPLE_BOX_PATH = "models/box.sdf"
SAMPLE_LINE_PATH = "models/line.sdf"
TRAFFIC_LIGHT_PATH = "model://traffic-light"
EMPTY_WORLD_PATH = "models/empty_world.world"
# Signs materials
class SignsModels(Enum):
STOP = "model://brick-sign"
ONLY_FORWARD = "model://forward-sign"
ONLY_RIGHT = "model://right-sign"
ONLY_LEFT = "model://left-sign"
FORWARD_OR_RIGHT = "model://forward-right-sign"
FORWARD_OR_LEFT = "model://forward-left-sign"
class WorldCreator:
# Variables:
box_counter = 0
sign_counter = 0
traffic_light_counter = 0
def __init__(self, map_params: MapParams):
"""
@brief Constructor that create empty world with defined config
"""
self.__create_empty_world()
self.map_params = map_params
def showTree(self):
"""
@brief Print on console xml tree of current world
"""
log.debug(etree.tostring(self.SDF_ROOT, pretty_print=True))
def writeWorldToFile(self, fileName):
"""
@brief Write current world to file
"""
with open(fileName, 'wb') as f:
f.write(etree.tostring(self.SDF_ROOT, pretty_print=True))
def addObject(self, obj: Object):
FUNCTIONS_MAPPING = {
ObjectType.WALL: self.__addWall,
ObjectType.SIGN: self.__addSign,
ObjectType.BOX: self.__addBox,
ObjectType.SQUARE: self.__addSquare,
ObjectType.TRAFFIC_LIGHT: self.__addTrafficLight
}
if obj.TYPE not in FUNCTIONS_MAPPING:
log.error('Object type {} is not supported in WORLD generation'.format(obj.TYPE.name))
return
FUNCTIONS_MAPPING[obj.TYPE](obj)
def __addWall(self, wall):
gz_wall = go.GazeboWall(wall, self.map_params)
pos_str = gz_wall.get_position_str()
size_str = gz_wall.get_size_str()
self.__spawnBox(pos_str, size_str)
def __addBox(self, box):
gz_box = go.GazeboBox(box, self.map_params)
pos_str = gz_box.get_position_str()
size_str = gz_box.get_size_str()
self.__spawnBox(pos_str, size_str)
def __addSquare(self, square):
gz_square = go.GazeboSquare(square, self.map_params)
size_str = gz_square.get_size_str()
pos_strs = gz_square.get_position_strs()
for pos_str in pos_strs:
self.__spawnBox(pos_str, size_str)
def __spawnBox(self, pos_str, size_str):
self.box_counter += 1
box_root = etree.parse(SAMPLE_BOX_PATH).getroot()
box_root.set("name", "box_{}".format(self.box_counter))
box_root.find("pose").text = pos_str
link = box_root.find("link")
link.find("collision").find("geometry").find("box").find("size").text = size_str
link.find("visual").find("geometry").find("box").find("size").text = size_str
self.SDF_ROOT.find("world").insert(0, box_root)
def __addSign(self, sign):
gz_sign = go.GazeboSign(sign, self.map_params)
pos_str = gz_sign.get_position_str()
_type = gz_sign.get_type()
SIGN_MODEL_MAP = {
SignsTypes.STOP.value: SignsModels.STOP,
SignsTypes.ONLY_FORWARD.value: SignsModels.ONLY_FORWARD,
SignsTypes.ONLY_LEFT.value: SignsModels.ONLY_LEFT,
SignsTypes.ONLY_RIGHT.value: SignsModels.ONLY_RIGHT,
SignsTypes.FORWARD_OR_LEFT.value: SignsModels.FORWARD_OR_LEFT,
SignsTypes.FORWARD_OR_RIGHT.value: SignsModels.FORWARD_OR_RIGHT,
}
if _type not in SIGN_MODEL_MAP:
log.error("Error: sign type \'{}\' is not supported".format(_type))
return
self.__spawnSign(pos_str, SIGN_MODEL_MAP[_type])
def __spawnSign(self, pos_str, model_path):
### LEFT/RIGHT_BOT/TOP - in terms of rendered map
log.debug("sign with pos: {} / {}".format(pos_str, model_path))
sign_root = etree.Element("include")
uri_elem = etree.Element("uri")
uri_elem.text = model_path.value
name_elem = etree.Element("name")
name_elem.text = "sign_{}".format(self.sign_counter)
pose_elem = etree.Element("pose")
pose_elem.text = pos_str
sign_root.append(uri_elem)
sign_root.append(name_elem)
sign_root.append(pose_elem)
self.SDF_ROOT.find("world").insert(0, sign_root)
self.sign_counter += 1
def __addTrafficLight(self, trafficLight):
go_traf_light = go.GazeboTrafficLight(trafficLight, self.map_params)
pos_str = go_traf_light.get_position_str()
self.__spawnTrafficLight(pos_str)
line_pos_str = go_traf_light.get_line_position_str()
line_size_str = go_traf_light.get_line_size_str()
self.__spawnTrafficLightLine(line_pos_str, line_size_str)
self.traffic_light_counter += 1
def __spawnTrafficLight(self, pos_str):
log.debug("traffic light with pos: {}".format(pos_str))
model_path = TRAFFIC_LIGHT_PATH
counter = self.traffic_light_counter
model_name = "traffic_light"
model_root = etree.Element("include")
uri_elem = etree.Element("uri")
uri_elem.text = model_path
name_elem = etree.Element("name")
name_elem.text = "{}_{}".format(model_name, counter)
pose_elem = etree.Element("pose")
pose_elem.text = pos_str
model_root.append(uri_elem)
model_root.append(name_elem)
model_root.append(pose_elem)
self.SDF_ROOT.find("world").insert(0, model_root)
def __spawnTrafficLightLine(self, pos_str, size_str):
log.debug("traffic light line with pos: {}".format(pos_str))
line_root = etree.parse(SAMPLE_LINE_PATH).getroot()
line_root.set("name", "tl_line_{}".format(self.traffic_light_counter))
line_root.find("pose").text = pos_str
link = line_root.find("link")
link.find("collision").find("geometry").find("plane").find("size").text = size_str
link.find("visual").find("geometry").find("plane").find("size").text = size_str
self.SDF_ROOT.find("world").insert(0, line_root)
def __create_empty_world(self):
"""
@brief Create sdf tree for empty world from file
"""
self.SDF_ROOT = etree.parse(EMPTY_WORLD_PATH).getroot()
|
# Supplementary classes and functions for ENGSCI233 notebook Data.ipynb
# author: David Dempsey
from matplotlib import pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
# Module 1: Data
def show_list(ll, ax, highlight=None, label=None, **kwargs):
ax.set_xlim([0,1])
ax.set_ylim([0,1])
ax.axis('off')
# check whether popped given
plot_popped = 'popped' in kwargs.keys()
# width of item
N = ll.get_length()
if plot_popped:
N += 1
w = 1./(2*N+3)
y0,y1 = [0.2,0.8]
# draw items
w0 = w
# print popped value first, if present
if plot_popped:
# draw box
poly = np.array([[w0,w0+w,w0+w,w0,w0],[y0,y0,y1,y1,y0]]).T
polygon = Polygon(poly, zorder=1)
p = PatchCollection([polygon,], facecolor = 'm', edgecolor = 'k', alpha = 0.3)
ax.add_collection(p)
# add text
ax.text(w0+0.5*w, 0.5*(y0+y1), '{}'.format(kwargs['popped']), ha = 'center', va='center')
w0 += 2*w
N = N-1
for i in range(N):
# draw box
poly = np.array([[w0,w0+w,w0+w,w0,w0],[y0,y0,y1,y1,y0]]).T
polygon = Polygon(poly, zorder=1)
if i == highlight:
col = 'g'
elif i == 0:
col = 'r'
else:
col = 'b'
p = PatchCollection([polygon,], facecolor = col, edgecolor = 'k', alpha = 0.3)
ax.add_collection(p)
# add text
ax.text(w0+0.5*w, 0.5*(y0+y1), '{}'.format(ll.get_value(i)), ha = 'center', va='center')
w0 += w
# draw arrow
col = [0.5,0.5,0.5]
if highlight is not None:
if i < highlight:
col = 'k'
ax.arrow(w0, 0.5*(y0+y1), w, 0, length_includes_head=True, head_length = 0.01, head_width = 0.1, color = col)
w0 += w
# draw null
poly = np.array([[w0,w0+w,w0+w,w0,w0],[y0,y0,y1,y1,y0]]).T
polygon = Polygon(poly, zorder=1)
p = PatchCollection([polygon,], facecolor = 'w', edgecolor = 'k', linestyle = '--')
ax.add_collection(p)
ax.text(w0+0.5*w, 0.5*(y0+y1), 'None', ha = 'center', va='center')
# add label
if label is not None:
ax.text(w0+1.5*w, 0.5*(y0+y1), label, ha = 'left', va='center')
|
import sys
import numpy as np
import torch
from pytorch_fid.inception import InceptionV3
sys.path.insert(0, '/workspace')
from datasets.custom_subset import SingleClassSubset
from utils.stylegan import create_image
class PRCD:
def __init__(self, dataset_real, dataset_fake, device, crop_size=None, generator=None, batch_size=128, dims=2048, num_workers=16, gpu_devices=[]):
self.dataset_real = dataset_real
self.dataset_fake = dataset_fake
self.batch_size = batch_size
self.dims = dims
self.num_workers = num_workers
self.device = device
self.generator = generator
self.crop_size = crop_size
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[self.dims]
inception_model = InceptionV3([block_idx])
if len(gpu_devices) > 1:
self.inception_model = torch.nn.DataParallel(inception_model, device_ids=gpu_devices)
else:
self.inception_model = inception_model
self.inception_model.to(self.device)
def compute_metric(self, num_classes, k=3, rtpt=None):
precision_list = []
recall_list = []
density_list = []
coverage_list = []
for step, cls in enumerate(range(num_classes)):
with torch.no_grad():
embedding_fake = self.compute_embedding(self.dataset_fake, cls)
embedding_real = self.compute_embedding(self.dataset_real, cls)
pair_dist_real = torch.cdist(embedding_real, embedding_real, p=2)
pair_dist_real = torch.sort(pair_dist_real, dim=1, descending=False)[0]
pair_dist_fake = torch.cdist(embedding_fake, embedding_fake, p=2)
pair_dist_fake = torch.sort(pair_dist_fake, dim=1, descending=False)[0]
radius_real = pair_dist_real[:, k]
radius_fake = pair_dist_fake[:, k]
# Compute precision
distances_fake_to_real = torch.cdist(embedding_fake, embedding_real, p=2)
min_dist_fake_to_real, nn_real = distances_fake_to_real.min(dim=1)
precision = (min_dist_fake_to_real <= radius_real[nn_real]).float().mean()
precision_list.append(precision.cpu().item())
# Compute recall
distances_real_to_fake = torch.cdist(embedding_real, embedding_fake, p=2)
min_dist_real_to_fake, nn_fake = distances_real_to_fake.min(dim=1)
recall = (min_dist_real_to_fake <= radius_fake[nn_fake]).float().mean()
recall_list.append(recall.cpu().item())
# Compute density
num_samples = distances_fake_to_real.shape[0]
sphere_counter = (distances_fake_to_real <= radius_real.repeat(num_samples, 1)).float().sum(dim=0).mean()
density = sphere_counter / k
density_list.append(density.cpu().item())
# Compute coverage
num_neighbors = (distances_fake_to_real <= radius_real.repeat(num_samples, 1)).float().sum(dim=0)
coverage = (num_neighbors > 0).float().mean()
coverage_list.append(coverage.cpu().item())
# Update rtpt
if rtpt:
rtpt.step(
subtitle=f'PRCD Computation step {step} of {num_classes}')
# Compute mean over targets
precision = np.mean(precision_list)
recall = np.mean(recall_list)
density = np.mean(density_list)
coverage = np.mean(coverage_list)
return precision, recall, density, coverage
def compute_embedding(self, dataset, cls=None):
self.inception_model.eval()
if cls:
dataset = SingleClassSubset(dataset, cls)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=self.batch_size,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=self.num_workers)
pred_arr = np.empty((len(dataset), self.dims))
start_idx = 0
max_iter = int(len(dataset) / self.batch_size)
for step, (x, y) in enumerate(dataloader):
with torch.no_grad():
if x.shape[1] != 3:
x = create_image(x, self.generator,
crop_size=self.crop_size, resize=299, batch_size=int(self.batch_size / 2))
x = x.to(self.device)
pred = self.inception_model(x)[0]
pred = pred.squeeze(3).squeeze(2).cpu().numpy()
pred_arr[start_idx:start_idx + pred.shape[0]] = pred
start_idx = start_idx + pred.shape[0]
return torch.from_numpy(pred_arr)
|
import curses
from spotui.src.util import truncate
from spotui.src.menu import Menu
from spotui.src.component import Component
class DeviceMenu(Component):
def __init__(self, stdscr, api, select_device, close):
self.stdscr = stdscr
self.api = api
self.select_device = select_device
self.close = close
self.active = True
self.popup = True
self.title = "Select a Device"
self.interactive = True
self.items = api.get_devices()
self.restart()
def restart(self):
self.items = self.api.get_devices()
scry, scrx = self.stdscr.getmaxyx()
box_height = round(scry / 2)
box_width = round(scrx / 2.5)
self.startx = round((scrx / 2) - (box_width / 2))
self.endx = self.startx + box_width
self.starty = round((scry / 2) - (box_height / 2))
self.endy = self.starty + box_height
self.component = Menu(
self.stdscr,
list(map(self.__map_devices, self.items))
if self.items and len(self.items) > 0 else [],
self.starty,
self.startx,
self.endy,
self.endx,
)
def __map_devices(self, item):
available_space = self.endx - self.startx - 6
item["text"] = truncate(item["text"], available_space)
def handler():
if item and "id" in item:
self.select_device(item["id"])
item["handler"] = handler
return item
def receive_input(self, key):
if ((key == curses.KEY_ENTER or key in [10, 13]) and self.items
and len(self.items) > 0):
self.items[self.component.selected]["handler"]()
self.close()
else:
self.component.receive_input(key)
|
import ast
import sys
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
class Kline:
def __init__(self, kl):
self.open_time = kl[0]
self.open = float(kl[1])
self.high = float(kl[2])
self.low = float(kl[3])
self.close = float(kl[4])
self.volume = float(kl[5])
self.close_time = kl[6]
self.quote_asset_volume = kl[7]
self.number_trades = kl[8]
self.taker_buy_base_asset = kl[9]
self.taker_buy_quote_asset = kl[10]
self.ignore = kl[11]
def __lt__(self, other):
return self.open_time < other.open_time
if __name__ == "__main__":
with open("LSKETH.log", "r") as f:
step = 1
data = f.readlines()
data = data[1:60*24*16:step]
print("Read " + str(len(data)) + " lines")
kl = list(map(lambda k: Kline(ast.literal_eval(k)), data))
# calc max percent change in 24h window
percent_ch = []
for index, v in enumerate(kl):
block = kl[index::24*60]
result = max(map(lambda x: ((x.high-v.low)/v.low)*100, block))
percent_ch.append(result)
plt.figure(1)
plt.plot(list(map(lambda x: (x.close + x.open)/2, kl)))
plt.figure(2)
plt.plot(percent_ch)
plt.figure(3)
n, bins, patches = plt.hist(percent_ch, 50, normed=1, facecolor='green', alpha=0.75)
number_of_entries = sum(bins[6:])*len(percent_ch)
print("Number of entries: " + str(number_of_entries))
plt.xlabel('Profit')
plt.ylabel('Probability')
plt.title('lisk market possible profit')
plt.axis([0, 40, 0, 0.1])
plt.grid(True)
diff = -sys.maxsize-1
for index, val in enumerate(kl):
if index != 0:
v = abs(kl[index-1].open_time - kl[index].open_time)
if v > diff:
diff = v
print(v)
print(diff)
plt.show()
|
var fs = require('fs');
var stemmer = require('../porter').stemmer;
process.chdir(__dirname);
exports['test Porter stemmer'] = function(test){
// check that the sample vocabulary given on
// http://tartarus.org/~martin/PorterStemmer/
// yields the expected output when stemmed.
var vocabulary = fs.readFileSync('./input.txt').toString().trim().split('\n');
var expected = fs.readFileSync('./output.txt').toString().trim().split('\n');
for (var i=0; i<vocabulary.length; i++) {
test.equals(stemmer(vocabulary[i]), expected[i]);
}
test.done();
};
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Fedele Mantuano (https://www.linkedin.com/in/fmantuano/)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This module is based on Analysis of SMTP dialects
(https://sissden.eu/blog/analysis-of-smtp-dialects)
"""
import datetime
import logging
import re
from operator import itemgetter
try:
from elasticsearch import Elasticsearch
from elasticsearch import ElasticsearchException
except ImportError:
raise ImportError("To use dialects module, you must use Elasticsearch")
try:
from modules.attachments import fingerprints
except ImportError:
from ...modules.attachments import fingerprints
log = logging.getLogger(__name__)
DIALECT_CLIENT_REGX_SORTED = [
(re.compile(r'(?:ehlo|helo)\s*', re.I), 0),
(re.compile(r'mail\s+from\s*:?\s*', re.I), 1),
(re.compile(r'rcpt\s+to\s*:?\s*', re.I), 2),
(re.compile(r'^[\b\s]*data[\b\s]*$', re.I), 3),
(re.compile(r'^[\b\s]*quit[\b\s]*$', re.I), 4),
]
# This query gets the code in postfix index
query_code = """
{
"query": {
"term": {
"message_id.keyword": {
"value": "%(message_id)s"
}
}
}
}
"""
# This query gets the client in postfix index
query_client = """
{
"query": {
"bool": {
"filter": {
"term": {
"tags": "client"
}
},
"must": [
{
"term": {
"code.keyword": {
"value": "%(code)s"
}
}
}
]
}
}
}
"""
# This query gets all communication from client and server
query_dialect = """
{
"query": {
"bool": {
"must": [
{
"range": {
"@timestamp": {
"gte": "%(timestamp)s||-30s",
"lte": "%(timestamp)s||+10s"
}
}
},
{
"term": {
"client_ip.keyword": {
"value": "%(client_ip)s"
}
}
},
{
"term": {
"client_name.keyword": {
"value": "%(client_name)s"
}
}
},
{
"term": {
"tags": {
"value": "dialect"
}
}
}
]
}
},
"sort": [
{
"@timestamp": {
"order": "asc"
}
}
]
}
"""
def get_elastic_indices(index_prefix="postfix-"):
"""
This function gets an Elastisearch index prefix
and returns a string comma-separated of indices of
yesterday and today.
The indices must be prefix-YEAR.MONTH.DAY
(postfix-2018.09.07)
Keyword Arguments:
index_prefix {str} -- prefix of Elasticsearch
indices (default: {"postfix-"})
Returns:
{str} -- list comma-separated of indices
"""
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
days = [today, yesterday]
indices = ",".join(["postfix-{}".format(
i.strftime("%Y.%m.%d")) for i in days])
return indices
def get_messages(message_id, elastic_server, index_prefix, max_size=100):
"""
This function returns a list with all SMTP messages between
client and server (dialect)
Arguments:
message_id {string} -- email message-id header
elastic_server {list/string} -- Elasticsearch server
index_prefix {string} -- prefix of Postfix index
Keyword Arguments:
max_size {int} -- Max size of messages to save (default: {100})
Returns:
list -- List tuples of SMTP messages. Every tuple has actor and message
"""
try:
# get indices to query
indices = get_elastic_indices(index_prefix)
# connect to Elasticsearch
es = Elasticsearch(hosts=elastic_server)
# From message_id get code of comunication from client and server
r = es.search(
index=indices,
body=query_code % {"message_id": message_id},
ignore_unavailable=True)
code = r["hits"]["hits"][0]["_source"]["code"]
timestamp = r["hits"]["hits"][0]["_source"]["@timestamp"]
log.debug("The code of {!r} is {!r}".format(message_id, code))
# From code get client (ip and name)
r = es.search(
index=indices,
body=query_client % {"code": code},
ignore_unavailable=True)
client_ip = r["hits"]["hits"][0]["_source"]["client_ip"]
client_name = r["hits"]["hits"][0]["_source"]["client_name"]
# From client get dialects
r = es.search(
index=indices,
body=query_dialect % {
"timestamp": timestamp,
"client_ip": client_ip,
"client_name": client_name},
size=max_size,
ignore_unavailable=True)
messages = [(i["_source"]["actor"],
i["_source"]["dialect"]) for i in r["hits"]["hits"]]
except ElasticsearchException:
log.exception(
"Failed query Elasticsearch for dialect: {!r}".format(message_id))
except IndexError:
log.debug("message-id {!r} not found".format(message_id))
else:
return messages
def get_messages_str(messages):
"""
From messeges list returns a string with all
conversation between client and server
Arguments:
messages {list} -- list of messages from get_messages
Returns:
str -- string of conversation
"""
messages_str = ""
for i in messages:
messages_str += "{}: {}\n".format(i[0], i[1])
return messages_str.strip("\n\t ")
def get_dialect(messages):
"""
This function gets only the client parts related
the SMTP command
Arguments:
messages {list} -- list of messages from get_messages
Returns:
list -- List of commands of client
"""
dialect = set()
for j in DIALECT_CLIENT_REGX_SORTED:
for i in messages:
if i[0] == "client":
r = j[0].findall(i[1])
if r:
dialect.add((r[0], j[1]))
else:
dialect = sorted(dialect, key=itemgetter(1))
return [i[0] for i in dialect]
def get_dialect_str(dialect):
"""
From list of dialect returns a string
Arguments:
dialect {list} -- output of get_dialect
Returns:
str -- string of client commands
"""
return " ".join(dialect)
def get_dialect_fingerprints(dialect):
"""
Given a dialect list returns the hashes of its string
version
Arguments:
dialect {list} -- output of get_dialect
Returns:
namedtuple -- fingerprints md5, sha1, sha256, sha512, ssdeep
"""
dialect_str = get_dialect_str(dialect)
return fingerprints(dialect_str)
def make_dialect_report(
message_id,
elastic_server,
index_prefix,
max_size=100
):
messages = get_messages(message_id, elastic_server, index_prefix, max_size)
if messages:
communication = get_messages_str(messages)
dialect = get_dialect(messages)
dialect_str = get_dialect_str(dialect)
report = {
"communication": communication,
"dialect": dialect_str}
report["md5"], report["sha1"], report["sha256"], \
report["sha512"], report["ssdeep"] = get_dialect_fingerprints(
dialect)
return report
|
import logging
import sys
import json
import django
from django.http import HttpResponse
logger = logging.getLogger(__name__)
class BinderException(Exception):
http_code = None
code = None
fields = None
validation_errors = None
def __init__(self):
self.fields = {}
def exception_location(self):
import traceback
# Exception traceback magic.
tb = sys.exc_info()[2]
file, line, method, code = traceback.extract_tb(tb)[-1]
return (file, method, line, code)
def log(self):
loc = '{1}:{2} in {0}'.format(*self.exception_location())
logger.warning('request raised exception {}: {} at {}'.format(self.__class__.__name__, self.data(), loc))
def data(self):
data = dict(self.fields)
data['code'] = self.code
if hasattr(self, 'object') and self.object:
data['object'] = self.object
return data
def response(self, request=None):
data = self.data()
data['debug'] = {'request_id': request.request_id if request else None}
if django.conf.settings.DEBUG:
data['debug']['location'] = '{1}:{2} in {0}'.format(*self.exception_location())
return HttpResponse(json.dumps(data), status=self.http_code, content_type='application/json')
# The Python Exception __str__ can return something unhelpful like "('foo', SomeOb)".
# This can be quite annoying during debug. __repr__ is better so we use that instead.
def __str__(self):
return repr(self)
class BinderInvalidURI(BinderException):
http_code = 418
code = 'InvalidURI'
def __init__(self, path):
super().__init__()
self.fields['path'] = path
self.fields['message'] = 'Undefined URI for this API.'
if not path.endswith('/'):
self.fields['message'] += ' (Hint: did you forget the trailing slash?)'
class BinderRequestError(BinderException):
http_code = 418
code = 'RequestError'
def __init__(self, message):
super().__init__()
self.fields['message'] = message
class BinderReadOnlyFieldError(BinderRequestError):
def __init__(self, model, field):
super().__init__('Read-only field: {{{}.{}}}.'.format(model, field))
class BinderFieldTypeError(BinderRequestError):
def __init__(self, *args):
super().__init__('Type error for field: {{{}}}.'.format('.'.join(args)))
class BinderInvalidField(BinderRequestError):
def __init__(self, model, field):
super().__init__('Invalid field name for {{{}}}: {{{}}}.'.format(model, field))
class BinderMethodNotAllowed(BinderException):
http_code = 405
code = 'MethodNotAllowed'
def __init__(self, methods=None):
super().__init__()
if methods is not None:
self.fields['allowed_methods'] = methods
class BinderNotAuthenticated(BinderException):
http_code = 403
code = 'NotAuthenticated'
class BinderForbidden(BinderException):
http_code = 403
code = 'Forbidden'
def __init__(self, perm, user):
super().__init__()
self.fields['required_permission'] = perm
username_field = getattr(user, 'USERNAME_FIELD', 'username')
self.fields['current_user'] = getattr(user, username_field)
class BinderCSRFFailure(BinderRequestError):
http_code = 403
code = 'CSRFFailure'
class BinderNotFound(BinderException):
http_code = 404
code = 'NotFound'
def __init__(self, resource=None):
super().__init__()
if resource:
self.fields['resource'] = resource
class BinderFileSizeExceeded(BinderException):
http_code = 413
code = 'FileSizeExceeded'
def __init__(self, max_size):
super().__init__()
self.fields['max_size'] = int(max_size * 10**6)
class BinderFileTypeIncorrect(BinderException):
http_code = 400
code = 'FileTypeIncorrect'
def __init__(self, allowed_types):
super().__init__()
self.fields['allowed_types'] = allowed_types
class BinderImageError(BinderException):
http_code = 400
code = 'ImageError'
def __init__(self, message):
super().__init__()
self.fields['message'] = message
class BinderImageSizeExceeded(BinderException):
http_code = 400
code = 'ImageSizeExceeded'
def __init__(self, max_width, max_height):
super().__init__()
self.fields['max_width'] = max_width
self.fields['max_height'] = max_height
class BinderIsDeleted(BinderException):
http_code = 405
code = 'IsDeleted'
class BinderIsNotDeleted(BinderException):
http_code = 405
code = 'IsNotDeleted'
class BinderValidationError(BinderException):
http_code = 400
code = 'ValidationError'
def __init__(self, errors):
super().__init__()
self.errors = errors
def data(self):
data = dict(self.fields)
data['code'] = self.code
data['errors'] = self.errors
return data
def __radd__(self, other):
return self + other
def __add__(self, other):
if other is None:
return self
errors = {}
for model in set(self.errors) | set(other.errors):
if model in self.errors and model in other.errors:
errors[model] = {}
for pk in set(self.errors[model]) | set(other.errors[model]):
if pk in self.errors[model] and pk in other.errors[model]:
errors[model][pk] = {}
for field in set(self.errors[model][pk]) | set(other.errors[model][pk]):
errors[model][pk][field] = (
self.errors[model][pk][field] if field in self.errors[model][pk] else []
) + (
other.errors[model][pk][field] if field in other.errors[model][pk] else []
)
elif pk in self.errors[model]:
errors[model][pk] = self.errors[model][pk]
else:
errors[model][pk] = other.errors[model][pk]
elif model in self.errors:
errors[model] = self.errors[model]
else:
errors[model] = other.errors[model]
return BinderValidationError(errors)
|
import numpy as np
from rh_logger.api import logger
import logging
import rh_logger
from collections import defaultdict
from mb_aligner.common import ransac
class EstimateUsingSimilarTilesMatches(object):
def __init__(self, **kwargs):
self._missing_matches = {}
if kwargs is None:
kwargs = {}
self._avoid_inter_mfov = kwargs.get("avoid_inter_mfov", False)
self._avoid_inter_mfov_2nd_degree = kwargs.get("avoid_inter_mfov_2nd_degree", False)
self._model_index = kwargs.get("model_index", 1) # Rigid
self._min_matches = kwargs.get("min_matches", 3)
self._iterations = kwargs.get("iterations", 1000)
self._max_epsilon = kwargs.get("max_epsilon", 5)
self._min_inlier_ratio = kwargs.get("min_inlier_ratio", 0)
self._min_num_inlier = kwargs.get("min_num_inlier", 3)
self._max_trust = kwargs.get("max_trust", 3)
self._det_delta = kwargs.get("det_delta", 0.99)
self._max_stretch = kwargs.get("max_stretch", 0.99)
self._robust_filter = True if "robust_filter" in kwargs else False
def add_missing_match(self, match):
self._missing_matches.update(match)
def fix_missing_matches(self, cur_matches):
# If there are no missing matches, return an empty map
if len(self._missing_matches) == 0:
return {}
# for each pair of matching tiles (from cur_matches), aggregate all matches to create median match
# (split by intra mfov and inter mfov)
intra_mfov_matches = defaultdict(list)
inter_mfov_matches = defaultdict(list)
for cur_match_k, cur_match_v in cur_matches.items():
# cur_match_k = (tile1_unique_idx, tile2_unique_idx)
# cur_match_v = filtered_matches : [pts1, pts2]
# where: tile_unique_idx = (tile.layer, tile.mfov_index, tile.tile_index)
tile1_layer, tile1_mfov_index, tile1_tile_index = cur_match_k[0]
tile2_layer, tile2_mfov_index, tile2_tile_index = cur_match_k[1]
if tile1_mfov_index == tile2_mfov_index:
intra_mfov_matches[tile1_tile_index, tile2_tile_index].append(cur_match_v)
elif self._avoid_inter_mfov:
continue
elif self._avoid_inter_mfov_2nd_degree:
if tile1_tile_index < 38 or tile2_tile_index < 38:
continue
else:
inter_mfov_matches[tile1_tile_index, tile2_tile_index].append(cur_match_v)
else:
inter_mfov_matches[tile1_tile_index, tile2_tile_index].append(cur_match_v)
intra_mfov_fake_matches = {}
inter_mfov_fake_matches = {}
# Add missing matches
new_matches = {}
for missing_match_k, missing_match_v in self._missing_matches.items():
# missing_match_k = (tile1_unique_idx, tile2_unique_idx)
# missing_match_v = (tile1, tile2)
tile1, tile2 = missing_match_v
# set where to look, depending whether it's intra mfov or inter mfov
if tile1.mfov_index == tile2.mfov_index:
fake_matches_list = intra_mfov_matches[tile1.tile_index, tile2.tile_index]
mfov_fake_matches = intra_mfov_fake_matches
else:
if self._avoid_inter_mfov:
continue
if self._avoid_inter_mfov_2nd_degree:
if tile1.tile_index < 38 or tile2.tile_index < 38:
continue
fake_matches_list = inter_mfov_matches[tile1.tile_index, tile2.tile_index]
mfov_fake_matches = inter_mfov_fake_matches
logger.report_event("Adding fake matches between: {} and {}".format((tile1.mfov_index, tile1.tile_index), (tile2.mfov_index, tile2.tile_index)), log_level=logging.INFO)
if (tile1.tile_index, tile2.tile_index) not in mfov_fake_matches.keys():
# Compute the best
mfov_fake_matches[tile1.tile_index, tile2.tile_index] = self._compute_fake_match(fake_matches_list)[0] # only keep the model
fake_match_model = mfov_fake_matches[tile1.tile_index, tile2.tile_index]
if fake_match_model is None:
continue
bbox1 = tile1.bbox
bbox2 = tile2.bbox
intersection = [max(bbox1[0], bbox2[0]),
min(bbox1[1], bbox2[1]),
max(bbox1[2], bbox2[2]),
min(bbox1[3], bbox2[3])]
intersection_center = np.array([intersection[0] + intersection[1], intersection[2] + intersection[3]]) * 0.5
fake_match_points_global = np.array([
[intersection_center[0] + intersection[0] - 2, intersection_center[1] + intersection[2] - 2],
[intersection_center[0] + intersection[1] + 4, intersection_center[1] + intersection[2] - 4],
[intersection_center[0] + intersection[0] + 2, intersection_center[1] + intersection[3] - 2],
[intersection_center[0] + intersection[1] - 4, intersection_center[1] + intersection[3] - 6]
]) * 0.5
fake_pts_tile1 = fake_match_points_global - np.array([bbox1[0], bbox1[2]])
fake_pts_tile2 = fake_match_model.apply(fake_pts_tile1)
fake_new_matches = np.array([
fake_pts_tile1, fake_pts_tile2
])
new_matches[missing_match_k] = fake_new_matches
return new_matches
def _compute_fake_match(self, fake_matches_list):
# Input: list of matches, each of the form [pts1, pts2]:
# output: a single fake match given by concatenating the lists, and ransacing it once
all_pts1 = []
all_pts2 = []
for pts1, pts2 in fake_matches_list:
all_pts1.extend(pts1)
all_pts2.extend(pts2)
all_matches = np.array([all_pts1, all_pts2])
model, filtered_matches, filtered_matches_mask = ransac.filter_matches(all_matches, all_matches, self._model_index, self._iterations, self._max_epsilon, self._min_inlier_ratio, self._min_num_inlier, self._max_trust, self._det_delta, self._max_stretch, robust_filter=self._robust_filter)
return model, filtered_matches, filtered_matches_mask
def reset(self):
self._missing_matches = {}
|
test = {
'name': 'matchmaker',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
sqlite> SELECT * FROM matchmaker ORDER BY color LIMIT 50;
koala|Thinking Out Loud||red
koala|Thinking Out Loud||blue
|Sandstorm|#009abf|black and gold
tiger|Sandstorm|#ff7700|blue
goat|Sandstorm|4|turquoise
dog|Sandstorm|black|green
dog|Sandstorm|black|blue
dog|Sandstorm|black|green
dog|Sandstorm|black|orange
dog|Sandstorm|black|blue
dog|Sandstorm|black|yellow
dog|Sandstorm|black|blue
dog|Sandstorm|black|blue
dog|Sandstorm|black|green
dog|Sandstorm|black|red
panda|Shake It Off|black|black
cat|That Way|black|navy
dog|That Way|black|purple
dog|That Way|black|blue
dog|That Way|black|purple
dog|That Way|black|gold
cat|Hello|black|red
cat|Hello|black|orange
dog|Hello|black|blue
dog|Hello|black|blue
dog|Thinking Out Loud|black|yellow
dog|Thinking Out Loud|black|green
dog|Shake It Off|blue|pink
dog|Shake It Off|blue|blue
dog|Shake It Off|blue|blue
dog|Shake It Off|blue|red
dog|Shake It Off|blue|orange
dog|Shake It Off|blue|blue
dog|Shake It Off|blue|blue
dog|Shake It Off|blue|blue
koala|Thinking Out Loud|blue|
koala|Thinking Out Loud|blue|red
koala|Thinking Out Loud|blue|blue
tiger|That Way|blue|blue
tiger|That Way|blue|green
tiger|That Way|blue|red
tiger|Hello|blue|red
tiger|Hello|blue|blue
dog|That Way|blue|blue
dog|That Way|blue|black
dog|That Way|blue|purple
dog|That Way|blue|blue
dog|That Way|blue|purple
dog|That Way|blue|gold
panda|Hello|blue|blue
""",
'hidden': False,
'locked': False
}
],
'ordered': False,
'scored': True,
'setup': r"""
sqlite> .read lab12.sql
""",
'teardown': '',
'type': 'sqlite'
}
]
}
|
# Item info
item_info_dict = {
18: {
'packagedVolume': 0.35,
'name': 'Plagioclase',
'volume': 0.35
},
19: {
'packagedVolume': 16.0,
'name': 'Spodumain',
'volume': 16.0
},
20: {
'packagedVolume': 1.2,
'name': 'Kernite',
'volume': 1.2
},
21: {
'packagedVolume': 3.0,
'name': 'Hedbergite',
'volume': 3.0
},
22: {
'packagedVolume': 16.0,
'name': 'Arkonor',
'volume': 16.0
},
34: {
'packagedVolume': 0.01,
'name': 'Tritanium',
'volume': 0.01
},
35: {
'packagedVolume': 0.01,
'name': 'Pyerite',
'volume': 0.01
},
36: {
'packagedVolume': 0.01,
'name': 'Mexallon',
'volume': 0.01
},
37: {
'packagedVolume': 0.01,
'name': 'Isogen',
'volume': 0.01
},
38: {
'packagedVolume': 0.01,
'name': 'Nocxium',
'volume': 0.01
},
39: {
'packagedVolume': 0.01,
'name': 'Zydrine',
'volume': 0.01
},
40: {
'packagedVolume': 0.01,
'name': 'Megacyte',
'volume': 0.01
},
621: {
'packagedVolume': 10000.0,
'name': 'Caracal',
'volume': 92000.0
},
622: {
'packagedVolume': 10000.0,
'name': 'Stabber',
'volume': 80000.0
},
623: {
'packagedVolume': 10000.0,
'name': 'Moa',
'volume': 101000.0
},
626: {
'packagedVolume': 10000.0,
'name': 'Vexor',
'volume': 115000.0
},
1223: {
'packagedVolume': 16.0,
'name': 'Bistot',
'volume': 16.0
},
1224: {
'packagedVolume': 0.3,
'name': 'Pyroxeres',
'volume': 0.3
},
1225: {
'packagedVolume': 16.0,
'name': 'Crokite',
'volume': 16.0
},
1226: {
'packagedVolume': 2.0,
'name': 'Jaspet',
'volume': 2.0
},
1227: {
'packagedVolume': 0.6,
'name': 'Omber',
'volume': 0.6
},
1228: {
'packagedVolume': 0.15,
'name': 'Scordite',
'volume': 0.15
},
1229: {
'packagedVolume': 5.0,
'name': 'Gneiss',
'volume': 5.0
},
1230: {
'packagedVolume': 0.1,
'name': 'Veldspar',
'volume': 0.1
},
1231: {
'packagedVolume': 3.0,
'name': 'Hemorphite',
'volume': 3.0
},
1232: {
'packagedVolume': 8.0,
'name': 'Dark Ochre',
'volume': 8.0
},
16227: {
'packagedVolume': 15000.0,
'name': 'Ferox',
'volume': 252000.0
},
16229: {
'packagedVolume': 15000.0,
'name': 'Brutix',
'volume': 270000.0
},
16242: {
'packagedVolume': 5000.0,
'name': 'Thrasher',
'volume': 43000.0
},
17425: {
'packagedVolume': 16.0,
'name': 'Crimson Arkonor',
'volume': 16.0
},
17426: {
'packagedVolume': 16.0,
'name': 'Prime Arkonor',
'volume': 16.0
},
17428: {
'packagedVolume': 16.0,
'name': 'Triclinic Bistot',
'volume': 16.0
},
17429: {
'packagedVolume': 16.0,
'name': 'Monoclinic Bistot',
'volume': 16.0
},
17432: {
'packagedVolume': 16.0,
'name': 'Sharp Crokite',
'volume': 16.0
},
17433: {
'packagedVolume': 16.0,
'name': 'Crystalline Crokite',
'volume': 16.0
},
17436: {
'packagedVolume': 8.0,
'name': 'Onyx Ochre',
'volume': 8.0
},
17437: {
'packagedVolume': 8.0,
'name': 'Obsidian Ochre',
'volume': 8.0
},
17440: {
'packagedVolume': 3.0,
'name': 'Vitric Hedbergite',
'volume': 3.0
},
17441: {
'packagedVolume': 3.0,
'name': 'Glazed Hedbergite',
'volume': 3.0
},
17444: {
'packagedVolume': 3.0,
'name': 'Vivid Hemorphite',
'volume': 3.0
},
17445: {
'packagedVolume': 3.0,
'name': 'Radiant Hemorphite',
'volume': 3.0
},
17448: {
'packagedVolume': 2.0,
'name': 'Pure Jaspet',
'volume': 2.0
},
17449: {
'packagedVolume': 2.0,
'name': 'Pristine Jaspet',
'volume': 2.0
},
17452: {
'packagedVolume': 1.2,
'name': 'Luminous Kernite',
'volume': 1.2
},
17453: {
'packagedVolume': 1.2,
'name': 'Fiery Kernite',
'volume': 1.2
},
17455: {
'packagedVolume': 0.35,
'name': 'Azure Plagioclase',
'volume': 0.35
},
17456: {
'packagedVolume': 0.35,
'name': 'Rich Plagioclase',
'volume': 0.35
},
17459: {
'packagedVolume': 0.3,
'name': 'Solid Pyroxeres',
'volume': 0.3
},
17460: {
'packagedVolume': 0.3,
'name': 'Viscous Pyroxeres',
'volume': 0.3
},
17463: {
'packagedVolume': 0.15,
'name': 'Condensed Scordite',
'volume': 0.15
},
17464: {
'packagedVolume': 0.15,
'name': 'Massive Scordite',
'volume': 0.15
},
17466: {
'packagedVolume': 16.0,
'name': 'Bright Spodumain',
'volume': 16.0
},
17467: {
'packagedVolume': 16.0,
'name': 'Gleaming Spodumain',
'volume': 16.0
},
17470: {
'packagedVolume': 0.1,
'name': 'Concentrated Veldspar',
'volume': 0.1
},
17471: {
'packagedVolume': 0.1,
'name': 'Dense Veldspar',
'volume': 0.1
},
17865: {
'packagedVolume': 5.0,
'name': 'Iridescent Gneiss',
'volume': 5.0
},
17866: {
'packagedVolume': 5.0,
'name': 'Prismatic Gneiss',
'volume': 5.0
},
17867: {
'packagedVolume': 0.6,
'name': 'Silvery Omber',
'volume': 0.6
},
17868: {
'packagedVolume': 0.6,
'name': 'Golden Omber',
'volume': 0.6
},
24696: {
'packagedVolume': 15000.0,
'name': 'Harbinger',
'volume': 234000.0
},
24698: {
'packagedVolume': 15000.0,
'name': 'Drake',
'volume': 252000.0
},
24702: {
'packagedVolume': 15000.0,
'name': 'Hurricane',
'volume': 216000.0
},
28367: {
'packagedVolume': 8.8,
'name': 'Compressed Arkonor',
'volume': 8.8
},
28385: {
'packagedVolume': 8.8,
'name': 'Compressed Crimson Arkonor',
'volume': 8.8
},
28387: {
'packagedVolume': 8.8,
'name': 'Compressed Prime Arkonor',
'volume': 8.8
},
28388: {
'packagedVolume': 4.4,
'name': 'Compressed Bistot',
'volume': 4.4
},
28389: {
'packagedVolume': 4.4,
'name': 'Compressed Monoclinic Bistot',
'volume': 4.4
},
28390: {
'packagedVolume': 4.4,
'name': 'Compressed Triclinic Bistot',
'volume': 4.4
},
28391: {
'packagedVolume': 7.81,
'name': 'Compressed Crokite',
'volume': 7.81
},
28392: {
'packagedVolume': 7.81,
'name': 'Compressed Crystalline Crokite',
'volume': 7.81
},
28393: {
'packagedVolume': 7.81,
'name': 'Compressed Sharp Crokite',
'volume': 7.81
},
28394: {
'packagedVolume': 4.2,
'name': 'Compressed Dark Ochre',
'volume': 4.2
},
28395: {
'packagedVolume': 4.2,
'name': 'Compressed Obsidian Ochre',
'volume': 4.2
},
28396: {
'packagedVolume': 4.2,
'name': 'Compressed Onyx Ochre',
'volume': 4.2
},
28397: {
'packagedVolume': 1.8,
'name': 'Compressed Gneiss',
'volume': 1.8
},
28398: {
'packagedVolume': 1.8,
'name': 'Compressed Iridescent Gneiss',
'volume': 1.8
},
28399: {
'packagedVolume': 1.8,
'name': 'Compressed Prismatic Gneiss',
'volume': 1.8
},
28400: {
'packagedVolume': 0.47,
'name': 'Compressed Glazed Hedbergite',
'volume': 0.47
},
28401: {
'packagedVolume': 0.47,
'name': 'Compressed Hedbergite',
'volume': 0.47
},
28402: {
'packagedVolume': 0.47,
'name': 'Compressed Vitric Hedbergite',
'volume': 0.47
},
28403: {
'packagedVolume': 0.86,
'name': 'Compressed Hemorphite',
'volume': 0.86
},
28404: {
'packagedVolume': 0.86,
'name': 'Compressed Radiant Hemorphite',
'volume': 0.86
},
28405: {
'packagedVolume': 0.86,
'name': 'Compressed Vivid Hemorphite',
'volume': 0.86
},
28406: {
'packagedVolume': 0.15,
'name': 'Compressed Jaspet',
'volume': 0.15
},
28407: {
'packagedVolume': 0.15,
'name': 'Compressed Pristine Jaspet',
'volume': 0.15
},
28408: {
'packagedVolume': 0.15,
'name': 'Compressed Pure Jaspet',
'volume': 0.15
},
28409: {
'packagedVolume': 0.19,
'name': 'Compressed Fiery Kernite',
'volume': 0.19
},
28410: {
'packagedVolume': 0.19,
'name': 'Compressed Kernite',
'volume': 0.19
},
28411: {
'packagedVolume': 0.19,
'name': 'Compressed Luminous Kernite',
'volume': 0.19
},
28415: {
'packagedVolume': 0.3,
'name': 'Compressed Golden Omber',
'volume': 0.3
},
28416: {
'packagedVolume': 0.3,
'name': 'Compressed Omber',
'volume': 0.3
},
28417: {
'packagedVolume': 0.3,
'name': 'Compressed Silvery Omber',
'volume': 0.3
},
28418: {
'packagedVolume': 28.0,
'name': 'Compressed Bright Spodumain',
'volume': 28.0
},
28419: {
'packagedVolume': 28.0,
'name': 'Compressed Gleaming Spodumain',
'volume': 28.0
},
28420: {
'packagedVolume': 28.0,
'name': 'Compressed Spodumain',
'volume': 28.0
},
28421: {
'packagedVolume': 0.15,
'name': 'Compressed Azure Plagioclase',
'volume': 0.15
},
28422: {
'packagedVolume': 0.15,
'name': 'Compressed Plagioclase',
'volume': 0.15
},
28423: {
'packagedVolume': 0.15,
'name': 'Compressed Rich Plagioclase',
'volume': 0.15
},
28424: {
'packagedVolume': 0.16,
'name': 'Compressed Pyroxeres',
'volume': 0.16
},
28425: {
'packagedVolume': 0.16,
'name': 'Compressed Solid Pyroxeres',
'volume': 0.16
},
28426: {
'packagedVolume': 0.16,
'name': 'Compressed Viscous Pyroxeres',
'volume': 0.16
},
28427: {
'packagedVolume': 0.19,
'name': 'Compressed Condensed Scordite',
'volume': 0.19
},
28428: {
'packagedVolume': 0.19,
'name': 'Compressed Massive Scordite',
'volume': 0.19
},
28429: {
'packagedVolume': 0.19,
'name': 'Compressed Scordite',
'volume': 0.19
},
28430: {
'packagedVolume': 0.15,
'name': 'Compressed Concentrated Veldspar',
'volume': 0.15
},
28431: {
'packagedVolume': 0.15,
'name': 'Compressed Dense Veldspar',
'volume': 0.15
},
28432: {
'packagedVolume': 0.15,
'name': 'Compressed Veldspar',
'volume': 0.15
},
32872: {
'packagedVolume': 5000.0,
'name': 'Algos',
'volume': 55000.0
},
32878: {
'packagedVolume': 5000.0,
'name': 'Talwar',
'volume': 43000.0
},
46675: {
'packagedVolume': 8.0,
'name': 'Jet Ochre',
'volume': 8.0
},
46676: {
'packagedVolume': 16.0,
'name': 'Cubic Bistot',
'volume': 16.0
},
46677: {
'packagedVolume': 16.0,
'name': 'Pellucid Crokite',
'volume': 16.0
},
46678: {
'packagedVolume': 16.0,
'name': 'Flawless Arkonor',
'volume': 16.0
},
46679: {
'packagedVolume': 5.0,
'name': 'Brilliant Gneiss',
'volume': 5.0
},
46680: {
'packagedVolume': 3.0,
'name': 'Lustrous Hedbergite',
'volume': 3.0
},
46681: {
'packagedVolume': 3.0,
'name': 'Scintillating Hemorphite',
'volume': 3.0
},
46682: {
'packagedVolume': 2.0,
'name': 'Immaculate Jaspet',
'volume': 2.0
},
46683: {
'packagedVolume': 1.2,
'name': 'Resplendant Kernite',
'volume': 1.2
},
46684: {
'packagedVolume': 0.6,
'name': 'Platinoid Omber',
'volume': 0.6
},
46685: {
'packagedVolume': 0.35,
'name': 'Sparkling Plagioclase',
'volume': 0.35
},
46686: {
'packagedVolume': 0.3,
'name': 'Opulent Pyroxeres',
'volume': 0.3
},
46687: {
'packagedVolume': 0.15,
'name': 'Glossy Scordite',
'volume': 0.15
},
46688: {
'packagedVolume': 16.0,
'name': 'Dazzling Spodumain',
'volume': 16.0
},
46689: {
'packagedVolume': 0.1,
'name': 'Stable Veldspar',
'volume': 0.1
},
46691: {
'packagedVolume': 8.8,
'name': 'Compressed Flawless Arkonor',
'volume': 8.8
},
46692: {
'packagedVolume': 4.4,
'name': 'Compressed Cubic Bistot',
'volume': 4.4
},
46693: {
'packagedVolume': 7.81,
'name': 'Compressed Pellucid Crokite',
'volume': 7.81
},
46694: {
'packagedVolume': 4.2,
'name': 'Compressed Jet Ochre',
'volume': 4.2
},
46695: {
'packagedVolume': 1.8,
'name': 'Compressed Brilliant Gneiss',
'volume': 1.8
},
46696: {
'packagedVolume': 0.47,
'name': 'Compressed Lustrous Hedbergite',
'volume': 0.47
},
46697: {
'packagedVolume': 0.86,
'name': 'Compressed Scintillating Hemorphite',
'volume': 0.86
},
46698: {
'packagedVolume': 0.15,
'name': 'Compressed Immaculate Jaspet',
'volume': 0.15
},
46699: {
'packagedVolume': 0.19,
'name': 'Compressed Resplendant Kernite',
'volume': 0.19
},
46700: {
'packagedVolume': 0.3,
'name': 'Compressed Platinoid Omber',
'volume': 0.3
},
46701: {
'packagedVolume': 0.15,
'name': 'Compressed Sparkling Plagioclase',
'volume': 0.15
},
46702: {
'packagedVolume': 0.16,
'name': 'Compressed Opulent Pyroxeres',
'volume': 0.16
},
46703: {
'packagedVolume': 0.19,
'name': 'Compressed Glossy Scordite',
'volume': 0.19
},
46704: {
'packagedVolume': 28.0,
'name': 'Compressed Dazzling Spodumain',
'volume': 28.0
},
46705: {
'packagedVolume': 0.15,
'name': 'Compressed Stable Veldspar',
'volume': 0.15
},
52306: {
'packagedVolume': 16.0,
'name': 'Talassonite',
'volume': 16.0
},
52315: {
'packagedVolume': 16.0,
'name': 'Rakovene',
'volume': 16.0
},
52316: {
'packagedVolume': 16.0,
'name': 'Bezdnacine',
'volume': 16.0
},
56625: {
'packagedVolume': 16.0,
'name': 'Abyssal Talassonite',
'volume': 16.0
},
56626: {
'packagedVolume': 16.0,
'name': 'Hadal Talassonite',
'volume': 16.0
},
56627: {
'packagedVolume': 16.0,
'name': 'Abyssal Bezdnacine',
'volume': 16.0
},
56628: {
'packagedVolume': 16.0,
'name': 'Hadal Bezdnacine',
'volume': 16.0
},
56629: {
'packagedVolume': 16.0,
'name': 'Abyssal Rakovene',
'volume': 16.0
},
56630: {
'packagedVolume': 16.0,
'name': 'Hadal Rakovene',
'volume': 16.0
}
}
# Example prices for items.
item_prices_dict = {
18: 70.0,
19: 10030.0,
20: 950.0,
21: 1107.11,
22: 2503.0,
34: 6.32,
35: 17.5,
36: 137.7,
37: 36.5,
38: 1159.0,
39: 1335.0,
40: 627.8,
621: 17150000.0,
622: 17400000.0,
623: 16260000.0,
626: 18000000.0,
1223: 3000.0,
1224: 44.2,
1225: 8301.0,
1226: 676.0,
1227: 328.8,
1228: 21.51,
1229: 1925.0,
1230: 17.98,
1231: 802.1,
1232: 4211.0,
16227: 53726000.0,
16229: 84732400.0,
16242: 1595000.0,
17425: 3376.0,
17426: 4157.0,
17428: 3100.0,
17429: 5500.0,
17432: 7700.0,
17433: 7900.0,
17436: 4100.0,
17437: 4125.0,
17440: 1000.0,
17441: 832.2,
17444: 827.0,
17445: 872.0,
17448: 849.0,
17449: 308.2,
17452: 39.0,
17453: 120.1,
17455: 71.01,
17456: 75.0,
17459: 45.83,
17460: 47.51,
17463: 21.76,
17464: 23.33,
17466: 5024.0,
17467: 7540.0,
17470: 18.0,
17471: 19.63,
17865: 1500.0,
17866: 1972.0,
17867: 124.7,
17868: 79.21,
24696: 82242000.0,
24698: 86692323.23,
24702: 68796315.79,
28367: 368500.0,
28385: 255300.0,
28387: 350000.0,
28388: 331800.0,
28389: 346900.0,
28390: 318300.0,
28391: 953400.0,
28392: 1050000.0,
28393: 990000.0,
28394: 599900.0,
28395: 600000.0,
28396: 548919.15,
28397: 330700.0,
28398: 299400.0,
28399: 277800.0,
28400: 169600.0,
28401: 249900.0,
28402: 200000.0,
28403: 198900.0,
28404: 95800.0,
28405: 113040.0,
28406: 109400.0,
28407: 86920.0,
28408: 85100.0,
28409: 17440.0,
28410: 26500.0,
28411: 14470.0,
28415: 18960.0,
28416: 15180.0,
28417: 14000.0,
28418: 995500.0,
28419: 1055927.27,
28420: 1217000.0,
28421: 8000.0,
28422: 7890.0,
28423: 8701.0,
28424: 5345.0,
28425: 5326.0,
28426: 5437.0,
28427: 2487.0,
28428: 2680.0,
28429: 2367.0,
28430: 1949.0,
28431: 2125.0,
28432: 1902.0,
32872: 1711000.0,
32878: 1600000.0,
46675: 4003.0,
46676: 4053.0,
46677: 9954.55,
46678: 2242.0,
46679: 2102.0,
46680: 900.0,
46681: 891.1,
46682: 501.3,
46683: 1.01,
46684: 15.0,
46685: 83.15,
46686: 54.07,
46687: 4.21,
46688: 5420.0,
46689: 17.0,
46691: 419900.0,
46692: 411200.0,
46693: 1090000.0,
46694: 651200.0,
46695: 378193.55,
46696: 183500.0,
46697: 123600.0,
46698: 118700.0,
46699: 32800.0,
46700: 17550.0,
46701: 9900.0,
46702: 9111.0,
46703: 3421.0,
46704: 1254000.0,
46705: 6500.0,
52306: 9032.0,
52315: 5688.0,
52316: 10170.0,
56625: 8000.0,
56626: 13550.0,
56627: 1803.0,
56628: 10000.0,
56629: 29000.0,
56630: 20010.0
}
# Mineral types (and quantites) that each ore yields.
# Ony include ores that *only* yield the minerals we want..
ore_yield_dict = {
18: {
34: 175,
36: 70
},
19: {
34: 48000,
37: 1000,
38: 160,
39: 80,
40: 40
},
20: {
36: 60,
37: 120
},
21: {
35: 450,
38: 120
},
22: {
35: 3200,
36: 1200,
40: 120
},
1223: {
35: 3200,
36: 1200,
39: 160
},
1224: {
35: 90,
36: 30
},
1225: {
35: 800,
36: 2000,
38: 800
},
1226: {
36: 150,
38: 50
},
1227: {
35: 90,
37: 75
},
1228: {
34: 150,
35: 90
},
1229: {
35: 2000,
36: 1500,
37: 800
},
1230: {
34: 400
},
1231: {
37: 240,
38: 90
},
1232: {
36: 1360,
37: 1200,
38: 320
},
17425: {
35: 3360,
36: 1260,
40: 126
},
17426: {
35: 3520,
36: 1320,
40: 132
},
17428: {
35: 3360,
36: 1260,
39: 168
},
17429: {
35: 3520,
36: 1320,
39: 176
},
17432: {
35: 840,
36: 2100,
38: 840
},
17433: {
35: 880,
36: 2200,
38: 880
},
17436: {
36: 1428,
37: 1260,
38: 336
},
17437: {
36: 1496,
37: 1320,
38: 352
},
17440: {
35: 473,
38: 126
},
17441: {
35: 495,
38: 132
},
17444: {
37: 252,
38: 95
},
17445: {
37: 264,
38: 99
},
17448: {
36: 158,
38: 53
},
17449: {
36: 165,
38: 55
},
17452: {
36: 63,
37: 126
},
17453: {
36: 66,
37: 132
},
17455: {
34: 184,
36: 74
},
17456: {
34: 193,
36: 77
},
17459: {
35: 95,
36: 32
},
17460: {
35: 99,
36: 33
},
17463: {
34: 158,
35: 95
},
17464: {
34: 165,
35: 99
},
17466: {
34: 50400,
37: 1050,
38: 168,
39: 84,
40: 42
},
17467: {
34: 52800,
37: 1100,
38: 176,
39: 88,
40: 44
},
17470: {
34: 420
},
17471: {
34: 440
},
17865: {
35: 2100,
36: 1575,
37: 840
},
17866: {
35: 2200,
36: 1650,
37: 880
},
17867: {
35: 95,
37: 79
},
17868: {
35: 99,
37: 83
},
28367: {
35: 3200,
36: 1200,
40: 120
},
28385: {
35: 3360,
36: 1260,
40: 126
},
28387: {
35: 3520,
36: 1320,
40: 132
},
28388: {
35: 3200,
36: 1200,
39: 160
},
28389: {
35: 3520,
36: 1320,
39: 176
},
28390: {
35: 3360,
36: 1260,
39: 168
},
28391: {
35: 800,
36: 2000,
38: 800
},
28392: {
35: 880,
36: 2200,
38: 880
},
28393: {
35: 840,
36: 2100,
38: 840
},
28394: {
36: 1360,
37: 1200,
38: 320
},
28395: {
36: 1496,
37: 1320,
38: 352
},
28396: {
36: 1428,
37: 1260,
38: 336
},
28397: {
35: 2000,
36: 1500,
37: 800
},
28398: {
35: 2100,
36: 1575,
37: 840
},
28399: {
35: 2200,
36: 1650,
37: 880
},
28400: {
35: 495,
38: 132
},
28401: {
35: 450,
38: 120
},
28402: {
35: 473,
38: 126
},
28403: {
37: 240,
38: 90
},
28404: {
37: 264,
38: 99
},
28405: {
37: 252,
38: 95
},
28406: {
36: 150,
38: 50
},
28407: {
36: 165,
38: 55
},
28408: {
36: 158,
38: 53
},
28409: {
36: 66,
37: 132
},
28410: {
36: 60,
37: 120
},
28411: {
36: 63,
37: 126
},
28415: {
35: 99,
37: 83
},
28416: {
35: 90,
37: 75
},
28417: {
35: 95,
37: 79
},
28418: {
34: 50400,
37: 1050,
38: 168,
39: 84,
40: 42
},
28419: {
34: 52800,
37: 1100,
38: 176,
39: 88,
40: 44
},
28420: {
34: 48000,
37: 1000,
38: 160,
39: 80,
40: 40
},
28421: {
34: 184,
36: 74
},
28422: {
34: 175,
36: 70
},
28423: {
34: 193,
36: 77
},
28424: {
35: 90,
36: 30
},
28425: {
35: 95,
36: 32
},
28426: {
35: 99,
36: 33
},
28427: {
34: 158,
35: 95
},
28428: {
34: 165,
35: 99
},
28429: {
34: 150,
35: 90
},
28430: {
34: 420
},
28431: {
34: 440
},
28432: {
34: 400
},
46675: {
36: 1564,
37: 1380,
38: 368
},
46676: {
35: 3680,
36: 1380,
39: 184
},
46677: {
35: 920,
36: 2300,
38: 920
},
46678: {
35: 3680,
36: 1380,
40: 138
},
46679: {
35: 2300,
36: 1725,
37: 920
},
46680: {
35: 518,
38: 138
},
46681: {
37: 276,
38: 104
},
46682: {
36: 173,
38: 58
},
46683: {
36: 69,
37: 138
},
46684: {
35: 104,
37: 86
},
46685: {
34: 201,
36: 81
},
46686: {
35: 104,
36: 35
},
46687: {
34: 173,
35: 104
},
46688: {
34: 55200,
37: 1150,
38: 184,
39: 92,
40: 46
},
46689: {
34: 460
},
46691: {
35: 3680,
36: 1380,
40: 138
},
46692: {
35: 3680,
36: 1380,
39: 184
},
46693: {
35: 920,
36: 2300,
38: 920
},
46694: {
36: 1564,
37: 1380,
38: 368
},
46695: {
35: 2300,
36: 1725,
37: 920
},
46696: {
35: 518,
38: 138
},
46697: {
37: 276,
38: 104
},
46698: {
36: 173,
38: 58
},
46699: {
36: 69,
37: 138
},
46700: {
35: 104,
37: 86
},
46701: {
34: 201,
36: 81
},
46702: {
35: 104,
36: 35
},
46703: {
34: 173,
35: 104
},
46704: {
34: 55200,
37: 1150,
38: 184,
39: 92,
40: 46
},
46705: {
34: 460
},
52306: {
34: 40000,
38: 960,
40: 32
},
52315: {
34: 40000,
37: 3200,
39: 200
},
52316: {
34: 40000,
37: 4800,
40: 128
},
56625: {
34: 42000,
38: 1008,
40: 34
},
56626: {
34: 44000,
38: 1056,
40: 35
},
56627: {
34: 42000,
37: 5040,
40: 134
},
56628: {
34: 44000,
37: 5280,
40: 141
},
56629: {
34: 42000,
37: 3360,
39: 210
},
56630: {
34: 44000,
37: 3520,
39: 220
}
}
ship_requirements_dict = {
621: {
34: 544444,
35: 122222,
36: 36667,
37: 10444,
38: 3333,
39: 1400,
40: 556
},
622: {
34: 511111,
35: 188889,
36: 40000,
37: 10333,
38: 2778,
39: 1244,
40: 244
},
623: {
34: 611111,
35: 155556,
36: 40000,
37: 9889,
38: 2667,
39: 1266,
40: 388
},
626: {
34: 622222,
35: 133333,
36: 41111,
37: 10111,
38: 2889,
39: 1312,
40: 356
},
16227: {
34: 2555556,
35: 666667,
36: 194444,
37: 22222,
38: 17222,
39: 8444,
40: 1778
},
16229: {
34: 3111111,
35: 700000,
36: 244444,
38: 12778,
39: 4888,
40: 2888
},
16242: {
34: 47907,
35: 11483,
36: 3977,
37: 1757,
39: 180,
40: 26
},
24696: {
34: 3888889,
35: 833333,
36: 205556,
37: 33333,
39: 6666,
40: 2666
},
24698: {
34: 2777778,
35: 680078,
36: 188889,
37: 20000,
38: 16667,
39: 8006,
40: 1888
},
24702: {
34: 3000000,
35: 755556,
36: 211111,
37: 55556,
39: 7112,
40: 2888
},
32872: {
34: 63000,
35: 17100,
36: 8100,
37: 900,
38: 210,
39: 60,
40: 6
},
32878: {
34: 61500,
35: 16800,
36: 6000,
37: 960,
38: 360,
39: 92,
40: 6
}
}
# How many ships of each type to build
ship_build_dict = {16242: 10, 626: 1}
# ME of the BP for the ship types we are building
ship_me_dict = {16242: 10, 626: 10}
|
from application.model.entity.usuario import Usuario
class Leitor(Usuario):
def __init__(self, autor_seguido = [], publicacao_curtida = []):
self._autor_seguido = autor_seguido
self._publicacao_curtida = publicacao_curtida
def get_autor_seguido(self):
return self._autor_seguido
def get_publicacao_curtida(self):
return self._publicacao_curtida
def set_autor_curtido(self, novo_autor):
self._autor_seguido.append(novo_autor)
def set_publicacao_curtida(self, nova_curtida):
self._publicacao_curtida.append(nova_curtida)
|
import itertools
import sys
##### Reading data from a text file
path_to_file = sys.argv[1]
with open(path_to_file) as f:
array = []
for line in f:
line = line.split()
if line:
line = [str(i) for i in line]
array.append(line)
array = list(itertools.chain(*array))
#####
##### Quicksort subroutine
def quicksort(array):
length = len(array)
if length < 2:
return array
pivot = array[0]
i = 1
for j in range(1,length):
if array[j] < pivot:
temp = array[j]
array[j] = array[i]
array[i] = temp
i = i + 1
temp = array[i-1]
array[i-1] = pivot
array[0] = temp
return quicksort(array[0:i-1]) + [pivot] + quicksort(array[i:])
#####
array = [val.lower() for val in array] # Convert to Lower Case
array = quicksort(array)
##### Removing duplicates from the output and printing the output
final_output = []
for word in array:
if word.lower() not in final_output:
final_output.append(word.lower())
sys.stdout.write(','.join(final_output))
#####
|
import unittest
from unittest import TestCase
import numpy as np
from escnn.gspaces import *
from escnn.nn import *
from escnn.nn.modules.basismanager import BlocksBasisSampler
import torch
class TestBasisSampler(TestCase):
def test_conv2d(self):
gspaces = [
rot2dOnR2(4),
flipRot2dOnR2(4),
flipRot2dOnR2(-1),
]
for gspace in gspaces:
reprs = gspace.irreps
try:
reg = gspace.regular_repr
reprs = [reg] + reprs
except ValueError:
pass
for i in range(len(reprs) - 1):
for j in range(len(reprs) - 1):
t1 = reprs[:i + 1]
t2 = reprs[:j + 1]
t1 = FieldType(gspace, t1)
t2 = FieldType(gspace, t2)
sigma = None
fco = 2.
layer = R2PointConv(t1, t2,
sigma=sigma,
width=2.,
n_rings=3,
frequencies_cutoff=fco,
bias=False)
self.compare(layer.basissampler, d=2)
def test_conv3d(self):
gspaces = [
flipRot3dOnR3(),
rot3dOnR3(),
# # fullIcoOnR3(),
# icoOnR3(),
octaOnR3(),
dihedralOnR3(),
rot2dOnR3(),
conicalOnR3(),
# fullCylindricalOnR3(),
# cylindricalOnR3(),
mirOnR3(),
invOnR3(),
trivialOnR3(),
]
for gspace in gspaces:
reprs = gspace.irreps[:4]
try:
reg = gspace.regular_repr
reprs = [reg] + reprs
except ValueError:
pass
for i in range(len(reprs) - 1):
for j in range(len(reprs) - 1):
print(gspace, len(reprs))
t1 = reprs[:i + 1]
t2 = reprs[:j + 1]
t1 = FieldType(gspace, t1)
t2 = FieldType(gspace, t2)
sigma = None
fco = 2.
layer = R3PointConv(t1, t2,
sigma=sigma,
width=2.,
n_rings=3,
frequencies_cutoff=fco,
bias=False)
self.compare(layer.basissampler, d=3)
def test_many_block_discontinuous(self):
gspace = rot3dOnR3()
t1 = FieldType(gspace, list(gspace.representations.values()) * 4)
t2 = FieldType(gspace, list(gspace.representations.values()) * 4)
sigma = None
fco = 2.
layer = R3PointConv(t1, t2,
sigma=sigma,
width=2.,
n_rings=3,
frequencies_cutoff=fco,
bias=False)
self.compare(layer.basissampler, d=3)
def test_many_block_sorted(self):
gspace = rot3dOnR3()
t1 = FieldType(gspace, list(gspace.representations.values()) * 4).sorted()
t2 = FieldType(gspace, list(gspace.representations.values()) * 4).sorted()
sigma = None
fco = 2.
layer = R3PointConv(t1, t2,
sigma=sigma,
width=2.,
n_rings=3,
frequencies_cutoff=fco,
bias=False)
self.compare(layer.basissampler, d=3)
def compare(self, basis: BlocksBasisSampler, d: int):
for i, attr1 in enumerate(basis.get_basis_info()):
attr2 = basis.get_element_info(i)
self.assertEquals(attr1, attr2)
self.assertEquals(attr1['id'], i)
for _ in range(5):
P = 20
pos = torch.randn(P, d)
x = torch.randn(P, basis._input_size)
distance = torch.norm(pos.unsqueeze(1) - pos, dim=2, keepdim=False)
thr = sorted(distance.view(-1).tolist())[
int(P ** 2 // 16)
]
edge_index = torch.nonzero(distance < thr).T.contiguous()
row, cols = edge_index
edge_delta = pos[row] - pos[cols]
x_j = x[cols]
w = torch.randn(basis.dimension())
f1 = basis(w, edge_delta)
f2 = basis(w, edge_delta)
self.assertTrue(torch.allclose(f1, f2))
y1 = basis.compute_messages(w, x_j, edge_delta, conv_first=False)
y2 = basis.compute_messages(w, x_j, edge_delta, conv_first=True)
np.set_printoptions(precision=7, suppress=True, linewidth=100000000000, threshold=10000000)
self.assertTrue(
torch.allclose(y1, y2, atol=1e-5, rtol=1e-5),
f"Error: outputs do not match!\n"
f"\t{basis._in_reprs}\n"
f"\t{basis._out_reprs}\n"
"Max Abs Error\n"
f"{torch.max(torch.abs(y1-y2)).item()}\n"
)
if __name__ == '__main__':
unittest.main()
|
# main_B
# set configuration only for root Logger
import logging, logging.config
from package1.module1_1 import func1_1_1
from package1.package1a.module1a_1 import func1a_1_1
from package2.module2_1 import func2_1
MODE = "set_file_filter"
if __name__=="__main__":
if MODE == "unexpected_log":
# [非推奨] ← module0の書き方
# you get the following output, though no handler is added.
# NullHandlerが渡されているモジュールのログは表示されない。
# >>> module0 : without_handler
from package0.module0 import func0
func0()
elif MODE == "set_root":
# [推奨]小さなプロジェクト、簡単な確認ならこれでもよい
# すべてのモジュールのINFOレベル以上のログが表示される
logging.basicConfig(level=logging.INFO
, format = '%(asctime)s\t%(name)-12s\t%(funcName)s\t%(levelname)-8s\t%(message)s'
, handlers = [logging.StreamHandler()])
elif MODE == "set_for":
# [非推奨]これ自体をモジュールにしてもいいかもしれないが、ちょっと無理がある
# モジュール毎に設定をプログラムで手書き
# すべてのモジュールのINFOレベル以上のログが表示される
for package in ["package1.module1_1","package2"]:
logger = logging.getLogger(package)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s\t%(name)-12s\t%(funcName)s\t%(levelname)-8s\t%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
elif MODE == "set_file":
# [推奨]
# yamlファイルに設定をまとめる
import yaml
with open("log2/logconfig.yaml", 'r') as yml:
yaml_dic = yaml.safe_load(yml)
logging.config.dictConfig(yaml_dic)
elif MODE == "set_file_filter":
# [推奨?]
# Filterを使う場合にはクラス定義が必要なので、それを設定読み込みと同時にモジュール化する
from logconfig import load_logconfig_dic # own module
conf_dic = load_logconfig_dic("log2/logconfig2.yaml", filtering=True)
logging.config.dictConfig(conf_dic)
func1_1_1()
func1a_1_1()
func2_1()
|
from rest_framework import permissions
class IsOfficial(permissions.BasePermission):
"""
Custom permission to allow only official users to pass.
"""
def has_object_permission(self, request, view, obj):
return request.user.is_official
|
from common.python.simulations import BlockSimulation, properties_from_ini
from collections import deque
# max queue size
MAX_QUEUE = 1023
# min FPGA deadtime between queued pulses
MIN_QUEUE_DELTA = 4
# time taken to clear queue
QUEUE_CLEAR_TIME = 4
NAMES, PROPERTIES = properties_from_ini(__file__, "pulse.block.ini")
class PulseSimulation(BlockSimulation):
ENABLE, TRIG, DELAY_L, DELAY_H, WIDTH_L, WIDTH_H, TRIG_EDGE, OUT, QUEUED, \
DROPPED = PROPERTIES
def __init__(self):
self.queue = deque()
self.valid_ts = 0
self.trigtime = 0
self.enqueue = 0
self.dequeue = 0
self.delaypulse = 0
self.delayqueue = 1
self.doqueue = 0
self.missedsignal = 0
self.width = 0
self.delay = 0
def do_pulse(self, ts, changes):
"""We've received a bit event on INP, so queue some output values
based on DELAY and WIDTH"""
# If the queue isn't valid at the moment then error
# If there isn't room for 2 on the queue then error
# If WIDTH is zero DELAY should be >3, or if DELAY is zero WIDTH
# should be >3 for the FIFO to iterate fully
width = self.width
delay = self.delay
if ts < self.valid_ts or len(self.queue) + 2 > MAX_QUEUE:
self.DROPPED += 1
# If there is no specified width then use the width of input pulse
elif width == 0:
self.queue.append((ts + delay, self.TRIG))
elif self.TRIG and self.TRIG_EDGE == 0:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay == 0:
self.generate_queue(ts+1, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay >= 0:
self.generate_queue(ts, delay, width)
elif self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay+1, width)
def generate_queue(self, ts, delay, width):
# generate both high and low queue from inp
start = ts + delay
# make sure that start is after any pulse on queue
if self.queue and start < self.queue[-1][0] + MIN_QUEUE_DELTA:
self.DROPPED += 1
self.missedsignal += 1
else:
self.queue.append((start, 1))
self.queue.append((start + width, 0))
def do_reset(self):
"""Reset the block, called on rising edge of ENABLE"""
self.DROPPED = 0
def do_clear_queue(self, ts):
"""Clear the queue, but not any errors"""
self.valid_ts = ts + QUEUE_CLEAR_TIME
self.OUT = 0
self.queue.clear()
def on_changes(self, ts, changes):
"""Handle changes at a particular timestamp, then return the timestamp
when we next need to be called"""
# This is a ConfigBlock object for use to get our strings from
super(PulseSimulation, self).on_changes(ts, changes)
# This is the next time we need to be called
next_ts = ts+1
# If the DELAY and WIDTH inputs are out of bounds, set them to 4
if 0 < self.DELAY_L < 4:
self.delay = 4
else:
self.delay = self.DELAY_L
if (0 < self.WIDTH_L < 4) and self.DELAY_L == 0:
self.width = 4
else:
self.width = self.WIDTH_L
# Append queue if the start of the queue is delayed
if self.delaypulse == 1:
if self.WIDTH_L > 0 or self.doqueue == 1:
self.QUEUED += 1
self.delaypulse = 0
self.doqueue = 0
elif changes.get(NAMES.TRIG, None) == 0:
self.doqueue = 1
# Increment the queue
if self.enqueue == 1 and ts == self.trigtime+1:
if self.missedsignal > 0:
self.missedsignal -= 1
else:
self.QUEUED += 1
# Is a pulse of zero required before next pulse?
if self.DELAY_L > 0:
self.delaypulse = 1
self.enqueue = 0
# On the trigger edge set the writestrobe to the queue
# If both DELAY and WIDTH are equal to 0, the module bypasses the queue
if self.width == 0 and self.delay == 0:
self.enqueue = 0
elif changes.get(NAMES.TRIG) == 1 and self.TRIG_EDGE in (0, 2):
# Positive edge
self.trigtime = ts
self.enqueue = 1
elif changes.get(NAMES.TRIG) == 0 and self.TRIG_EDGE in (1, 2):
# Negative edge
self.trigtime = ts + 1
self.enqueue = 1
# Set attributes, and flag clear queue
for name, value in changes.items():
setattr(self, name, value)
if name in ("DELAY_L", "DELAY_L", "WIDTH_L", "WIDTH_L"):
self.do_clear_queue(ts)
# On rising edge of enable clear errors
if changes.get(NAMES.ENABLE, None) == 1:
self.do_reset()
# on falling edge of enable reset output and queue
elif changes.get(NAMES.ENABLE, None) == 0:
self.do_clear_queue(ts)
# If we got an input and we were enabled then output a pulse
if NAMES.TRIG in changes and self.ENABLE:
self.do_pulse(ts, changes)
# if we have anything else on the queue return when it's due
if self.queue:
# next_ts = self.queue[0][0]
# if the pulse on our queue is ready to be produced then produce
if self.queue[0][0] == ts:
if self.queue.popleft()[1] == 1:
self.OUT = 1
self.dequeue = 1
else:
self.OUT = 0
assert next_ts >= ts, "Going back in time %s >= %s" % (next_ts, ts)
# At the end of the pulse, the queue count has decreased
if self.OUT == 0 and self.dequeue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.dequeue = 0
self.delayqueue = 1
# Decrease the queue count for the zero pulse
if self.OUT == 1 and self.delayqueue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.delayqueue = 0
return next_ts
|
from PyQt5 import QtGui
class SLOTEditDialog(QtGui.QWidget):
def __init__(self, parent=None, model, encodingModel):
super(SLOTEditDialog, self).__init__(parent)
self.model = model
self.encodingModel = encodingModel
# Set up the widgets.
nameLabel = QtGui.QLabel("Name")
nameEdit = QtGui.QLineEdit()
symbolLabel = QtGui.QLabel("Symbol")
symbolEdit = QtGui.QLineEdit()
symbolLabel.setBuddy(symbolEdit)
numeratorLabel = QtGui.QLabel("Numerator")
numeratorEdit = QtGui.QLineEdit()
numeratorLabel.setBuddy(numeratorEdit)
denominatorLabel = QtGui.QLabel("Denominator")
denominatorEdit = QtGui.QLineEdit()
denominatorLabel.setBuddy(denominatorEdit)
offsetLabel = QtGui.QLabel("Offset")
offsetEdit = QtGui.QLineEdit()
offsetLabel.setBuddy(offsetEdit)
encodingLabel = QtGui.QLabel("Encoding")
encodingComboBox = QtGui.QComboBox()
encodingLabel.setBuddy(encodingComboBox)
self.nextButton = QtGui.QPushButton("&Next")
self.previousButton = QtGui.QPushButton("&Previous")
encodingComboBox.setModel(self.encodingModel)
# Set up the mapper.
self.mapper = QtGui.QDataWidgetMapper(self)
self.mapper.setModel(self.model)
self.mapper.addMapping(nameEdit, 0)
self.mapper.addMapping(symbolEdit, 1)
self.mapper.addMapping(numeratorEdit,2)
self.mapper.addMapping(denominatorEdit,3)
self.mapper.addMapping(offsetEdit,4)
self.mapper.addMapping(encodingComboBox, 6)
# Set up connections and layouts.
self.previousButton.clicked.connect(self.mapper.toPrevious)
self.nextButton.clicked.connect(self.mapper.toNext)
self.mapper.currentIndexChanged.connect(self.updateButtons)
layout = QtGui.QGridLayout()
layout.addWidget(nameLabel, 0, 0, 1, 1)
layout.addWidget(nameEdit, 0, 1, 1, 1)
layout.addWidget(self.previousButton, 0, 2, 1, 1)
layout.addWidget(symbolLabel, 1, 0, 1, 1)
layout.addWidget(symbolEdit, 1, 1, 2, 1)
layout.addWidget(self.nextButton, 1, 2, 1, 1)
layout.addWidget(encodingLabel, 3, 0, 1, 1)
layout.addWidget(encodingComboBox, 3, 1, 1, 1)
self.setLayout(layout)
self.mapper.toFirst()
def updateButtons(self, row):
self.previousButton.setEnabled(row > 0)
self.nextButton.setEnabled(row < self.model.rowCount() - 1)
|
import os
import time
import shutil
import pathlib
from socketer import MASM
import threading
from facer import Facer
masmPath = None
pDataPath = None
pLBPHPath = None
pNamePath = None
detcMethod = 0 # 0 HAAR, 1 DNN, 2 BOTH
failTimeout = 10
memoryTimeout = 5
lastAccess = False
preparedYet = False
keepWebcamOpen = None
chosenCam = 0
detcThread = None
detcRun = threading.Event()
detcLock = threading.Lock()
# Prepares face-data
def facePrepare(retake = False, overrideTimeout = 0):
global masmPath
global pDataPath
global pLBPHPath
global pNamePath
global detcMethod
global preparedYet
global memoryTimeout
global keepWebcamOpen
# If recreation is desired, remove old data
if retake is True:
pLBPHPath.unlink(missing_ok = True)
pNamePath.unlink(missing_ok = True)
# face-data path does not exist, create it
if not pDataPath.exists():
pDataPath.mkdir(parents = True, exist_ok = True)
# existing facial data exists, load the data
if pLBPHPath.exists() and pNamePath.exists() and not preparedYet:
print("Loading face-data")
Facer.load_trained_lbph(str(pLBPHPath), str(pNamePath))
preparedYet = True
else: # no existing data or update
if not retake and pLBPHPath.exists() and pNamePath.exists():
print("Updating with new data..")
if pLBPHPath.stat().st_size > 100000000 * (memoryTimeout / 5): # Limit max memory size
print("Memory size limit reached, re-memorizing instead..")
pLBPHPath.unlink(missing_ok = True)
pNamePath.unlink(missing_ok = True)
overrideTimeout = 0 # We want larger chunk of initial data, reset override
retake = True
else:
print("No face-data found, taking..")
MASM.sendData("FDAR_NOPREPAREDATA")
chosenTimeout = memoryTimeout
if overrideTimeout > 0:
chosenTimeout = overrideTimeout
try:
if not keepWebcamOpen and not Facer.camOn():
print("Camera failed to open")
if not Facer.camOff():
print("Camera failed to close?")
return False
takeDNN = False
if detcMethod == 1 or detcMethod == 2:
takeDNN = True
if not Facer.take_faces("Player", count = 0, timeout = chosenTimeout, recreate = retake, useDNN = takeDNN, minLightLevel = 15):
if not keepWebcamOpen and not Facer.camOff():
print("Camera failed to close?")
return False
if not keepWebcamOpen and not Facer.camOff():
print("Camera failed to close?")
except Facer.LightLevelLow:
if not keepWebcamOpen and not Facer.camOff():
print("Camera failed to close?")
raise
except Facer.NoFacesFound:
print(f"Face couldn't be found within {chosenTimeout*2}s")
if not keepWebcamOpen and not Facer.camOff():
print("Camera failed to close?")
return False
except Exception as e:
print(f"Exception on taking data: {e}")
if not keepWebcamOpen and not Facer.camOff():
print("Camera failed to close?")
return False
try:
if not Facer.train_faces_lbph(recreate = retake):
MASM.sendData("FDAR_FAILURE")
return False
except Exception as e:
print(f"Exception on train: {e}")
MASM.sendData("FDAR_FAILURE")
try:
Facer.save_trained_lbph(str(pLBPHPath), str(pNamePath))
except Exception as e:
print(f"Exception on save: {e}")
MASM.sendData("FDAR_FAILURE")
preparedYet = True
return True
# Data not prepared exception
class DataNotPrepared(Exception):
pass
threshold = 0.6
methodSwitch = False
# Recognizes all known people
# Returns list of recognized names
def recognizeKnown():
global threshold
global detcMethod
global preparedYet
global methodSwitch
if not preparedYet:
print("Tried to recognize before data is prepared")
raise DataNotPrepared
else:
try:
Facer.camClearBuffer()
frame = Facer.camFrame(minLightLevel = 15)
except Facer.LightLevelLow:
raise
except Exception as e:
print(f"Capture frame exception: {e}")
MASM.sendData("FDAR_FAILURE")
return None
else:
try:
if detcMethod == 0:
methodSwitch = False
elif detcMethod == 1:
methodSwitch = True
elif detcMethod == 2:
methodSwitch = not methodSwitch
found, people = Facer.recognize_faces_lbph(frame, threshold, methodSwitch)
except Exception as e:
print(f"LBPH recognizing exception: {e}")
#MASM.sendData("FDAR_FAILURE") # Disabled cuz hitting Python's nerve or something causing exception with random number, randomly. Works despite that
return None
else:
if found:
knownFound = []
for person in people:
if person[0] is None:
#print("Found someone")
#knownFound.append("FDAR_SOMEONE")
# raise the threshold slowly to recognize person eventually
if threshold < 0.8:
threshold += 0.05
else:
print(f"Found {person[0]}")
knownFound.append(person[0])
if threshold > 0.6: # Keep threshold somewhere around where person can be detected
threshold -= 0.06
return knownFound
else:
print("Found nobody")
return None
MASM.sendData("FDAR_FAILURE")
return None
# Non-blocking recognizion loop
def _recognizeLoop():
global detcRun
global lastAccess
global preparedYet
global failTimeout
global keepWebcamOpen
if not preparedYet:
print("Not prepared yet")
try:
if not facePrepare():
print("Failed to prepare data")
MASM.sendData("FDAR_FAILURE")
else:
MASM.sendData("FDAR_MEMORIZE_DONE")
except Facer.LightLevelLow:
print("Low-light on prepare")
MASM.sendData("FDAR_MEMORIZE_LOWLIGHT")
except Exception as e:
print(f"Exception when preparing: {e}")
MASM.sendData("FDAR_FAILURE")
return
lastTime = time.time()
while not detcRun.is_set():
toMemorize = MASM.hasDataValue("FDAR_MEMORIZE")
if toMemorize is not None and lastAccess:
try:
(removeOld, override) = toMemorize
if removeOld:
preparedYet = False
if not facePrepare(retake = removeOld, overrideTimeout = override):
print("Failed to memorize")
MASM.sendData("FDAR_FAILURE")
else:
MASM.sendData("FDAR_MEMORIZE_DONE")
except Facer.LightLevelLow:
print("Low-light on memorize")
MASM.sendData("FDAR_MEMORIZE_LOWLIGHT")
except Exception as e:
print(f"Exception on memorize: {e}")
MASM.sendData("FDAR_FAILURE")
shouldRecognize = False
toRecognize = MASM.hasDataValue("FDAR_RECOGNIZEONCE")
if toRecognize is not None and lastAccess:
if not preparedYet:
print("Memory not prepared for recognition")
MASM.sendData("FDAR_NOTMEMORIZED")
else:
shouldRecognize = True
if shouldRecognize:
if not keepWebcamOpen and not Facer.camOn():
print("Camera failed to open")
MASM.sendData("FDAR_FAILURE")
shouldRecognize = False
else:
startTime = time.time()
while time.time() - startTime < failTimeout:
if MASM.hasDataBool("FDAR_RECOGNIZESTOP"):
shouldRecognize = False
break
elif time.time() - lastTime > 1.0: # Ease up on loop, attempt every second
try:
res = recognizeKnown()
except Facer.LightLevelLow:
print("Low-light on recognize")
MASM.sendData("FDAR_LOWLIGHT") # No breaking here so we can fail eventually as we want to keep trying
except DataNotPrepared:
shouldRecognize = False
break # We don't want to deal with this here.. Trust me I tried
except Exception as e:
print(f"Recognizing known exception: {e}")
MASM.sendData("FDAR_FAILURE")
shouldRecognize = False
break
else:
if res is not None:
for recognized in res:
MASM.sendData("FDAR_RECOGNIZED", recognized)
if toRecognize in res:
shouldRecognize = False
break
lastTime = time.time()
else:
time.sleep(0.1)
if not keepWebcamOpen and not Facer.camOff():
print("Camera failed to close?")
if MASM.hasDataBool("FDAR_RECOGNIZESTOP"):
pass # Clear this so next recognitions won't fail immediately if duplicate data is received
time.sleep(1) # No hogging CPU and data-locks!
def Update():
global detcRun
global chosenCam
global detcMethod
global lastAccess
global detcThread
global preparedYet
global failTimeout
global memoryTimeout
global keepWebcamOpen
# TODO: Recognize multiple people?
if MASM.hasDataCheck("FDAR_KEEPOPEN", bool):
newKeepOpen = MASM.hasDataValue("FDAR_KEEPOPEN")
if lastAccess:
if keepWebcamOpen and not newKeepOpen and not Facer.camOff():
print("Camera failed to close?")
elif not keepWebcamOpen and newKeepOpen:
if not Facer.camOn():
print("Camera failed to open")
MASM.sendData("FDAR_FAILURE")
else:
Facer.camFrame() # Turn on light
MASM.sendData("FDAR_CAMON")
keepWebcamOpen = newKeepOpen
if MASM.hasDataCheck("FDAR_SETTIMEOUT", int):
if (newVal := MASM.hasDataValue("FDAR_SETTIMEOUT", 0)) > 0:
failTimeout = newVal
if MASM.hasDataCheck("FDAR_SETMEMORYTIMEOUT", int):
if (newVal := MASM.hasDataValue("FDAR_SETMEMORYTIMEOUT", 0)) > 0:
memoryTimeout = newVal
method = MASM.hasDataValue("FDAR_DETECTIONMETHOD")
if method is not None:
if method == "HAAR":
detcMethod = 0
elif method == "DNN":
detcMethod = 1
elif method == "BOTH":
detcMethod = 2
if MASM.hasDataBool("FDAR_GETCAMS"):
if keepWebcamOpen:
Facer.camOff()
MASM.sendData("FDAR_CAMSLIST", Facer.getCams())
if keepWebcamOpen:
Facer.camOn()
Facer.camFrame()
if MASM.hasDataCheck("FDAR_SETCAM", int):
chosenCam = MASM.hasDataValue("FDAR_SETCAM", 0)
if MASM.hasDataBool("FDAR_TESTCAM"):
try:
if keepWebcamOpen:
Facer.camOff()
Facer.camOn(id = chosenCam)
Facer.camFrame()
else:
Facer.camOn(id = chosenCam)
Facer.camFrame()
time.sleep(3) # Yes, bad
Facer.camOff()
except Exception as e:
print(f"Error wat: {e}")
# Message tells whether we are allowed to recognize or not
allowAccess = MASM.hasDataValue("FDAR_ALLOWACCESS")
if allowAccess is True and allowAccess != lastAccess:
try:
print("Recognition allowed")
if keepWebcamOpen and not Facer.camOn():
print("Camera failed to open")
else:
if keepWebcamOpen:
Facer.camFrame() # Turn on light with empty read
detcRun.clear()
if detcThread is None:
detcThread = threading.Thread(target = _recognizeLoop)
detcThread.start()
lastAccess = allowAccess
except Exception as e:
print(f"Exception to start recognition thread: {e}")
if not Facer.camOff(): # Just in case
print("Camera failed to close?")
elif allowAccess is False and allowAccess != lastAccess:
try:
print("Recognition not allowed")
detcRun.set()
if detcThread is not None:
detcThread.join()
detcThread = None
if not Facer.camOff():
print("Camera failed to close?")
lastAccess = allowAccess
preparedYet = False # So we can re-check for data existence
except Exception as e:
print(f"Exception to stop recognition thread: {e}")
if not Facer.camOff(): # Just in case as well
print("Camera failed to close?")
def Start():
global masmPath
global pDataPath
global pLBPHPath
global pNamePath
global detcThread
# Setup some paths
masmPath = os.path.dirname(os.path.realpath(__file__)) # Get our full path
pDataPath = pathlib.Path(masmPath)/"face-data" # Data folder
pLBPHPath = pDataPath/"data-lbph.xml" # Data file
pNamePath = pDataPath/"data-names.pkl" # Names file
# Create thread
detcThread = threading.Thread(target = _recognizeLoop)
def OnQuit():
global detcRun
global detcThread
detcRun.set()
detcThread.join()
Facer.camOff()
|
from docs_snippets_crag.concepts.configuration.configured_named_op_example import datasets
def test_job():
result = datasets.to_job().execute_in_process(
run_config={
"ops": {
"sample_dataset": {"inputs": {"xs": [4, 8, 15, 16, 23, 42]}},
"full_dataset": {
"inputs": {"xs": [33, 30, 27, 29, 32, 30, 27, 28, 30, 30, 30, 31]}
},
}
}
)
sample_dataset = result.output_for_node("sample_dataset")
full_dataset = result.output_for_node("full_dataset")
assert len(sample_dataset) == 5
assert len(full_dataset) == 12
|
from .lit import LIT
def build_model(config):
model_type = config.MODEL.TYPE
if model_type == 'lit':
model = LIT(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.LIT.PATCH_SIZE,
in_chans=config.MODEL.LIT.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.LIT.EMBED_DIM,
depths=config.MODEL.LIT.DEPTHS,
num_heads=config.MODEL.LIT.NUM_HEADS,
mlp_ratio=config.MODEL.LIT.MLP_RATIO,
qkv_bias=config.MODEL.LIT.QKV_BIAS,
qk_scale=config.MODEL.LIT.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.LIT.APE,
patch_norm=config.MODEL.LIT.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
else:
raise NotImplementedError(f"Unkown model: {model_type}")
return model
|
# -*- coding: utf-8 -*-
# @File : models.py
# @Author : AaronJny
# @Time : 2020/03/26
# @Desc :
import tensorflow as tf
from dataset import tokenizer
import settings
model = tf.keras.Sequential([
tf.keras.layers.Input((None,)),
tf.keras.layers.Embedding(input_dim=tokenizer.vocab_size, output_dim=128),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(64),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid)
])
model.summary()
EPSILON = 1e-07
def recall_m(y_true, y_pred):
"""
计算召回率
"""
true_positives = tf.reduce_sum(tf.round(tf.clip_by_value(y_true * y_pred, 0, 1)))
possible_positives = tf.reduce_sum(tf.round(tf.clip_by_value(y_true, 0, 1)))
recall = true_positives / (possible_positives + EPSILON)
return recall
def precision_m(y_true, y_pred):
"""
计算精确率
"""
true_positives = tf.reduce_sum(tf.round(tf.clip_by_value(y_true * y_pred, 0, 1)))
predicted_positives = tf.reduce_sum(tf.round(tf.clip_by_value(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + EPSILON)
return precision
def f1_m(y_true, y_pred):
"""
计算f1值
"""
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + EPSILON))
model.compile(optimizer=tf.keras.optimizers.Adam(settings.LEARNING_RATE), loss=tf.keras.losses.binary_crossentropy,
metrics=['accuracy', f1_m, precision_m, recall_m])
|
from plotly.graph_objs import Parcoords
|
from mirari.mirari.admin import *
from .models import *
from .vars import *
@admin.register(Sellpoint)
class SellpointAdmin(PassAdmin):
pass
@admin.register(Menu)
class MenuAdmin(PassAdmin):
pass
@admin.register(Product)
class ProductAdmin(PassAdmin):
pass
@admin.register(ProductAttributes)
class ProductAttributesAdmin(PassAdmin):
pass
@admin.register(Ticket)
class TicketAdmin(PassAdmin):
list_display = ('sellpoint','barcode', 'key','rasurado','total',)
search_fields = ('key', 'barcode')
@admin.register(TicketProducts)
class TicketProductsAdmin(PassAdmin):
pass
@admin.register(Cut)
class CutAdmin(PassAdmin):
list_display = ('sellpoint',)
@admin.register(Offer)
class OfferAdmin(PassAdmin):
pass
@admin.register(ClientProfile)
class ClientProfileAdmin(PassAdmin):
pass
@admin.register(Client)
class ClientAdmin(PassAdmin):
pass
@admin.register(SellpointGroups)
class SellpointGroupsAdmin(PassAdmin):
pass
|
from .models import BirthdayWRandomNumberExt
from datetime import datetime
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.test import TestCase
#
# Model Tests
#
class BirthdayWRandomNumberExtTests(TestCase):
def test_must_have_birthday(self):
'''Test that the birthday cannot be null.
'''
usr = BirthdayWRandomNumberExt()
self.assertRaises(IntegrityError, usr.save)
def test_is_thirteen(self):
'''Make sure is_thirteen returns True if thirteen.
'''
# Should be coded such that the values for 13 change over
# time, but overkill for this.
usr = BirthdayWRandomNumberExt(birthday=datetime.date(datetime(*(2000, 01, 01))))
usr.save()
self.assertTrue(usr.is_thirteen())
usr = BirthdayWRandomNumberExt(birthday=datetime.date(datetime(*(2020, 01, 01))))
usr.save()
self.assertFalse(usr.is_thirteen())
def test_random_number_is_generated(self):
'''Test that the random generator is working and not leaving
random_number_field as 0.
'''
usr = BirthdayWRandomNumberExt(birthday='2000-01-01')
usr.save()
self.failIfEqual(usr.random_number_field, 0)
def test_bizz_fuzz(self):
'''Test bizz_fuzz returns expected values.
'''
usr = BirthdayWRandomNumberExt(birthday=datetime.date(datetime(*(2000, 01, 01))))
usr.save()
# Multiple of 3 and 5
usr.random_number_field = 15
self.assertEqual(usr.bizz_fuzz(), 'BizzFuzz')
# Multiple of 3
usr.random_number_field = 6
self.assertEqual(usr.bizz_fuzz(), 'Bizz')
# Multiple of 5
usr.random_number_field = 10
self.assertEqual(usr.bizz_fuzz(), 'Fuzz')
# Neither 3 or 5
usr.random_number_field = 1
self.assertEqual(usr.bizz_fuzz(), 1)
def test_username_uniqueness(self):
'''Test that usernames are always unique.
'''
usr1 = BirthdayWRandomNumberExt(birthday=datetime.date(datetime(*(2000, 01, 01))))
usr1.save()
usr2 = BirthdayWRandomNumberExt(birthday=datetime.date(datetime(*(2001, 01, 01))))
usr2.save()
usr3 = BirthdayWRandomNumberExt(birthday=datetime.date(datetime(*(2002, 01, 01))))
usr3.save()
usr2.delete()
usr4 = BirthdayWRandomNumberExt(birthday=datetime.date(datetime(*(2002, 01, 01))))
try:
usr4.save()
self.assertTrue(True, 'Username uniqueness enforced.')
except IntegrityError:
self.fail('Username not unique')
#
# View Tests
#
class ViewTests(TestCase):
def test_endpoint_200s(self):
'''Ensure that all endpoints return 200.
'''
for endpoint in ['index', 'add_user', 'csv_data']:
print '\n\n'+reverse('br_users:%s' % endpoint)
response = self.client.get(reverse('br_users:%s' % endpoint))
self.assertEqual(response.status_code, 200, 'For %s' % endpoint)
usr = BirthdayWRandomNumberExt(birthday='2014-10-12')
usr.save()
for endpoint in ['user_info', 'edit_user', 'delete_user']:
print '\n\n'+reverse('br_users:%s' % endpoint, kwargs={'pk': 1})
response = self.client.get(reverse('br_users:%s' % endpoint, args=('1')))
print endpoint
self.assertEqual(response.status_code, 200)
|
import streamlit as st
import streamlit.components.v1 as components
def app():
components.html(
'''
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Find Homes</title>
</head>
<body>
<div id="zillow-large-search-box-widget-container" style="width:432px;overflow:hidden;background-color:#e7f1fd;color:#555; font: normal normal normal 13px verdana,arial,sans-serif;line-height:13px;margin:0 auto;padding:0;text-align:center;border:1px solid #adcfff;letter-spacing:0;text-transform:none;"><h2 style="color:#d61;text-align:left;font-size:20px;line-height:20px;font-weight:normal;float:left;width:200px;margin-left:10px;margin-top:5px;letter-spacing:0;text-transform:none;">Find Homes</h2><div style="float:right;"><a href="https://www.zillow.com/" target="_blank" rel="nofollow"><img alt="Zillow Real Estate Information" style="border:0;" src="https://www.zillow.com/widgets/GetVersionedResource.htm?path=/static/images/powered-by-zillow.gif"></img></a></div><iframe scrolling="no" src="https://www.zillow.com/widgets/search/LargeSearchBoxWidget.htm?did=zillow-large-search-box-iframe-widget&type=iframe&rgname=Seattle+WA&shvi=yes" width="430" frameborder="0" height="400"></iframe><table id="zillow-tnc-widget-footer-links" width="100%" style="font: normal normal normal 10px verdana,arial,sans-serif;text-align:left;line-height:12px;margin:10px 5px;padding:0;border-spacing:0;border-collapse:collapse;"><tbody style="margin:0;padding:0;"><tr style="margin:0;padding:0;"><td style="font-weight:bold;font-size:10px;color:#555;text-align:left;margin:0;padding:0;">QUICK LINKS:</td></tr><tr style="margin:0;padding:0;"><td style="margin:0;padding:0;"><span id="widgetFooterLink" class="regionBasedLink"><a href="https://www.zillow.com/seattle-wa/" target="_blank" rel="nofollow" style="color:#36b;font-family:verdana,arial,sans-serif;font-size:10px;margin:0 5px 0 0;padding:0;text-decoration:none;"><span class="region">Seattle</span> Real Estate Listing</a></span></td><td style="margin:0;padding:0;"><span id="widgetFooterLink"><a href="https://www.zillow.com/mortgage-rates/" target="_blank" rel="nofollow" style="color:#36b;font-family:verdana,arial,sans-serif;font-size:10px;margin:0 5px 0 0;padding:0;text-decoration:none;">Mortgage Rates</a></span></td><td style="margin:0;padding:0;"><span id="widgetFooterLink"><a href="https://www.zillow.com/refinance/" target="_blank" rel="nofollow" style="color:#36b;font-family:verdana,arial,sans-serif;font-size:10px;margin:0 5px 0 0;padding:0;text-decoration:none;">Refinancing</a></span></td></tr><tr style="margin:0;padding:0;"><td style="margin:0;padding:0;"><span id="widgetFooterLink" class="regionBasedLink"><a href="https://www.zillow.com/seattle-wa/foreclosures/" target="_blank" rel="nofollow" style="color:#36b;font-size:10px;margin:0 5px 0 0;padding:0;text-decoration:none;"><span class="region">Seattle</span> Foreclosures</a></span></td><td style="margin:0;padding:0;"><span id="widgetFooterLink"><a href="https://www.zillow.com/mortgage-calculator/" target="_blank" rel="nofollow" style="color:#36b;font-size:10px;margin:0 5px 0 0;padding:0;text-decoration:none;">Mortgage Calculators</a></span></td><td style="margin:0;padding:0;"><span id="widgetFooterLink"><a href="https://www.zillow.com/mortgage-rates/" target="_blank" rel="nofollow" style="color:#36b;font-size:10px;margin:0 5px 0 0;padding:0;text-decoration:none;">Purchase Loans</a></span></td></tr></tbody></table></div>
</body>
''',
height=600)
|
# https://adventofcode.com/2021/day/6
def solve(timers, days):
if len(timers) == 0:
return 0
for day in range(0, days):
for i in range(0, len(timers)):
if timers[i] == 0:
timers[i] = 6 # reset timers
timers.append(8) # newborn lanternfish
else:
timers[i] -= 1
return len(timers)
def input_processing(content):
return [int(val.strip()) for val in content.split(',')]
if __name__ == '__main__':
f = open('input.txt')
timers = input_processing(f.read())
days = 80
res = solve(timers, days)
print(res)
|
"""
---> Car Pooling
---> Medium
"""
from heapq import *
class Solution:
def carPooling(self, trips, capacity: int) -> bool:
trips = sorted(trips, key=lambda x: x[1])
heap = []
for (c, f, t) in trips:
while heap and heap[0][0] <= f:
v = heappop(heap)[1]
capacity += v
if c > capacity:
return False
else:
capacity -= c
heappush(heap, (t, c))
return True
def carPooling_sol2(self, trips, capacity: int) -> bool:
all_trips = []
for trip in trips:
all_trips.append((trip[1], trip[0]))
all_trips.append((trip[2], -trip[0]))
all_trips.sort()
count = 0
for i in all_trips:
count += i[1]
if count > capacity:
return False
return True
in_trips = [[2, 1, 5], [3, 3, 7]]
in_capacity = 5
a = Solution()
print(a.carPooling(in_trips, in_capacity))
print(a.carPooling_sol2(in_trips, in_capacity))
"""
Approach 1:
Add to heap one by one from the array sorted using the pickup point and decrease the capacity of the car, check for the
last entered location's end point to next pickups start point if comes before restore the capacity of the car, If anyone
of the trips mismatch it is a false
Approach 2:
Divide each trip into start point and capacity and end point and -1*capacity, sort them now check for the capacity for
each trip
Reference - https://leetcode.com/problems/car-pooling/discuss/1596704/Python-Min-Heap
"""
|
#!/usr/bin/env python
# coding: utf-8
import sys
import numpy as np
from scipy.linalg import logm
from scipy import sparse
import scipy.linalg as linalg
import statistics
import utils
from numba import njit
import qutip as qt
sys.path.append("/home/felipe/git-projects/syk-nonergodic/")
def _ptrace_dense(Q, dims, sel):
rd = np.asarray(dims[0], dtype=np.int32).ravel()
nd = len(rd)
if isinstance(sel, int):
sel = np.array([sel])
else:
sel = np.asarray(sel)
sel = list(np.sort(sel))
dkeep = (rd[sel]).tolist()
qtrace = list(set(np.arange(nd)) - set(sel))
dtrace = (rd[qtrace]).tolist()
rd = list(rd)
rhomat = np.trace(
Q.reshape(rd + rd)
.transpose(qtrace + [nd + q for q in qtrace] + sel + [nd + q for q in sel])
.reshape([np.prod(dtrace), np.prod(dtrace), np.prod(dkeep), np.prod(dkeep)])
)
return rhomat
def entropy(dm):
vals = np.linalg.eigvalsh(dm)
posvals = vals[vals > 1e-10]
logvals = np.log(posvals)
return float(np.real(-sum(posvals * logvals)))
@njit
def generate_observable(na):
"""Generates a fixed random observable for each value of N_A.
"""
na = np.uint32(na)
np.random.seed(na)
dima = 2 ** na
observable = np.zeros((dima, dima), dtype=np.complex64)
for i in range(dima):
observable[i, i] = np.random.normal()
for j in range(i + 1, dima):
observable[i, j] = (np.random.normal() + 1j * np.random.normal()) / np.sqrt(
2
)
observable[j, i] = np.conj(observable[i, j])
return observable
def main(psiarr, N):
num_permutations = 3
neff = N - 1
numpoints, numsamples, dim = psiarr.shape
na_range = np.arange(1, neff)
# numpoints = 13
# numsamples = 100
R = np.zeros((numpoints, numsamples, neff - 1), dtype=np.float64)
# Iterate over values of disorder.
for i in range(numpoints):
# For each disorder value, iterate over different samples.
for j in range(numsamples):
# Get a single eigenvector
psi = psiarr[i, j]
# Build full density matrix from eigenvector.
fulldm = np.outer(psi, psi.conj())
# Set dims for density matrix to enable partial trace.
dims = [[2] * neff, [2] * neff]
rng = np.random.default_rng()
# iterate over different sizes nb
for k, na in enumerate(na_range):
observable = generate_observable(na)
val = 0
for p in range(num_permutations):
# Randomly choose na sites from the total N-1 to keep after ptrace
sites_a = np.sort(rng.choice(neff, na, replace=False))
# rdm = fulldm.ptrace(sites_a)
rdm = _ptrace_dense(fulldm, dims, sites_a)
val += np.real(np.trace(observable.dot(rdm))) / num_permutations
R[i, j, k] = val
return R
if __name__ == "__main__":
N = 7
MINSEED = 0
MAXSEED = 10
neff = N - 1
dim = int(2 ** neff)
# Read data from file and select only center of band
delta, eigval, eigvec = statistics.read_eigenspectrum(N, MINSEED, MAXSEED)
numpoints = len(delta)
cob_eigval, cob_eigvec = statistics.get_center_of_band(eigval, eigvec)
cob_eigvec = np.transpose(cob_eigvec, axes=[0, 1, 2, 4, 3])
full_eigvec = np.transpose(eigvec, axes=[0, 1, 2, 4, 3])
# This is structured as (numpoints, numsamples, dim).
# `numpoints` is the number of disorder points,
# `numsamples` is the number of samples for each disorder strength,
# and `dim` is the dimension of the full Hilbert space before bipartition.
# psiarr = cob_eigvec.reshape(numpoints, -1, dim)
psiarr = full_eigvec.reshape(numpoints, -1, dim)
earr = eigval.reshape(numpoints, -1)
# delta, avg_see, avg_traces = main(psiarr=psiarr, N=N)
# numpoints = avg_see.shape[0]
# file_name = f"data/entropy/entropy_N{N}.npz"
# na_range = np.arange(1, N - 1)
# np.savez_compressed(
# file_name, delta= _traces
# )
# rdm = fulldm.ptrace(sites_a)
|
reversed_letters = {
'A' : 'A',
'E' : '3',
'H' : 'H',
'I' : 'I',
'J' : 'L',
'L' : 'J',
'M' : 'M',
'O' : 'O',
'S' : '2',
'T' : 'T',
'U' : 'U',
'V' : 'V',
'W' : 'W',
'X' : 'X',
'Y' : 'Y',
'Z' : '5',
'1' : '1',
'2' : 'S',
'3' : 'E',
'5' : 'Z',
'8' : '8'
}
from sys import stdin, stdout
while True:
try:
string = input()
# Ugly version with early break
# is_palin = True
# is_mirrored = True
# i = 0
# j = len(string) - 1
# while i < j:
# if string[i] != string[j]:
# is_palin = False
# if (string[i] not in reversed_letters) or (string[j] != reversed_letters[string[i]]) \
# or (string[j] not in reversed_letters) or (string[i] != reversed_letters[string[j]]):
# is_mirrored = False
# i += 1
# j -= 1
# if not is_mirrored and not is_palin:
# break
# if is_mirrored and len(string) & 1 == 1:
# c_m = string[len(string) // 2]
# if (c_m not in reversed_letters) or (c_m != reversed_letters[c_m]):
# is_mirrored = False
# Short version
is_palin = string == string[::-1]
is_mirrored = string == "".join([reversed_letters.setdefault(ch, "") for ch in string[::-1]])
outcome = "is not a palindrome" if (not is_palin and not is_mirrored) else \
"is a regular palindrome" if (not is_mirrored and is_palin) else \
"is a mirrored string" if (not is_palin and is_mirrored) else \
"is a mirrored palindrome"
print("{} -- {}.\n".format(string, outcome))
except(EOFError):
break
|
from __future__ import annotations
import pytest
from poetry.core.packages.specification import PackageSpecification
@pytest.mark.parametrize(
"spec1, spec2, expected",
[
(PackageSpecification("a"), PackageSpecification("a"), True),
(PackageSpecification("a", "type1"), PackageSpecification("a", "type1"), True),
(PackageSpecification("a", "type1"), PackageSpecification("a", "type2"), False),
(PackageSpecification("a"), PackageSpecification("a", "type1"), False),
(PackageSpecification("a", "type1"), PackageSpecification("a"), False),
],
)
def test_is_same_package_source_type(
spec1: PackageSpecification,
spec2: PackageSpecification,
expected: bool,
) -> None:
assert spec1.is_same_package_as(spec2) == expected
@pytest.mark.parametrize(
("source_type", "result"),
[
("directory", True),
("file", True),
("url", True),
("git", True),
("legacy", False),
(None, False),
],
)
def test_is_direct_origin(source_type: str | None, result: bool) -> None:
assert PackageSpecification("package", source_type).is_direct_origin() == result
@pytest.mark.parametrize(
"spec1, spec2, expected",
[
(PackageSpecification("a"), PackageSpecification("a"), True),
(PackageSpecification("a"), PackageSpecification("b"), False),
(PackageSpecification("a", features=["x"]), PackageSpecification("a"), True),
(
PackageSpecification("a", features=["x"]),
PackageSpecification("a", features=["x"]),
True,
),
(
PackageSpecification("a", features=["x"]),
PackageSpecification("b", features=["x"]),
False,
),
(
PackageSpecification("a", features=["x"]),
PackageSpecification("a", features=["y"]),
False,
),
(
PackageSpecification("a", features=["x"]),
PackageSpecification("a", features=["x", "y"]),
False,
),
(
PackageSpecification("a", features=["x", "y"]),
PackageSpecification("a", features=["x"]),
True,
),
],
)
def test_specification_provides(
spec1: PackageSpecification,
spec2: PackageSpecification,
expected: bool,
) -> None:
assert spec1.provides(spec2) == expected
@pytest.mark.parametrize(
"spec1, spec2",
[
(
# nothing except for name and features matters if no source
PackageSpecification("a", None, "url1", "ref1", "resref1", "sub1"),
PackageSpecification("a", None, "url2", "ref2", "resref2", "sub2"),
),
(
# ref does not matter if resolved ref is equal
PackageSpecification("a", "type", "url", "ref1", "resref1"),
PackageSpecification("a", "type", "url", "ref2", "resref1"),
),
(
# resolved ref does not matter if no ref
PackageSpecification("a", "type", "url", None, "resref1"),
PackageSpecification("a", "type", "url", None, "resref2"),
),
(
# resolved ref unset when ref starts with other
PackageSpecification("a", "type", "url", "ref/a", "resref1"),
PackageSpecification("a", "type", "url", "ref", None),
),
(
# resolved ref unset when ref starts with other
PackageSpecification("a", "type", "url", "ref/a", None),
PackageSpecification("a", "type", "url", "ref", "resref2"),
),
],
)
def test_equal_specifications_have_same_hash(
spec1: PackageSpecification, spec2: PackageSpecification
) -> None:
assert spec1 == spec2
assert spec2 == spec1
assert hash(spec1) == hash(spec2)
|
import unittest
import struct
import pytest
import pytds
from pytds.smp import *
from utils import MockSock
smp_hdr = struct.Struct('<BBHLLL')
class SmpSessionsTests(unittest.TestCase):
def setUp(self):
self.sock = MockSock()
self.mgr = SmpManager(self.sock)
self.sess = self.mgr.create_session()
self.buf = bytearray(b'0' * 100)
self.sock.consume_output()
def test_valid_data(self):
self.sock.set_input([smp_hdr.pack(0x53, 8, 0, len(b'test') + 16, 1, 10) + b'test'])
l = self.sess.recv_into(self.buf)
assert self.buf[:l] == b'test'
def test_invalid_flags(self):
self.sock.set_input([smp_hdr.pack(0x53, 16, 0, 16, 1, 10)])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Unexpected FLAGS' in str(excinfo.value)
def test_syn_packet(self):
"""
Server should not send SYN packets to a client, only client can send those
"""
self.sock.set_input([smp_hdr.pack(0x53, 1, 0, 16, 1, 10)])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Unexpected SYN' in str(excinfo.value)
def test_data_after_fin(self):
sess2 = self.mgr.create_session()
self.sock.set_input([smp_hdr.pack(0x53, 4, 0, 16, 1, 10) + smp_hdr.pack(0x53, 8, 0, 16, 2, 10)])
assert self.sess.recv_into(self.buf) == 0
with pytest.raises(pytds.Error) as excinfo:
sess2.recv_into(self.buf)
assert 'Unexpected DATA packet from server' in str(excinfo.value)
def test_fin_after_fin(self):
sess2 = self.mgr.create_session()
self.sock.set_input([smp_hdr.pack(0x53, 4, 0, 16, 1, 10) + smp_hdr.pack(0x53, 4, 0, 16, 2, 10)])
assert self.sess.recv_into(self.buf) == 0
with pytest.raises(pytds.Error) as excinfo:
sess2.recv_into(self.buf)
assert 'Unexpected FIN' in str(excinfo.value)
def test_data_after_close(self):
"""should ignore data sent from server if we already send FIN packet"""
self.sock.set_input([smp_hdr.pack(0x53, 8, 0, len(b'test') + 16, 1, 10) + b'test' +
smp_hdr.pack(0x53, 4, 0, 16, 1, 10)])
assert self.sess.get_state() == SessionState.SESSION_ESTABLISHED
self.sess.close()
assert self.sess.get_state() == SessionState.CLOSED
assert self.sess.recv_into(self.buf) == 0
def test_close_twice(self):
# this test is optional, maybe it does not behave like that
self.sock.set_input([smp_hdr.pack(0x53, 4, 0, 16, 1, 10)])
self.sess.close()
self.sess.close()
def test_ack_after_fin(self):
sess2 = self.mgr.create_session()
self.sock.set_input([smp_hdr.pack(0x53, 4, 0, 16, 1, 10) + smp_hdr.pack(0x53, 2, 0, 16, 2, 10)])
assert self.sess.recv_into(self.buf) == 0
with pytest.raises(pytds.Error) as excinfo:
sess2.recv_into(self.buf)
assert 'Unexpected ACK packet from server' in str(excinfo.value)
def test_unexpected_eof(self):
"""
Should raise EOF error if socket does not have enough data to fill SMP header
"""
self.sock.set_input([b'0' * 10])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Unexpected EOF' in str(excinfo.value)
def test_invalid_id(self):
self.sock.set_input([smp_hdr.pack(0, 4, 0, 16, 1, 10)])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Invalid SMP packet signature' in str(excinfo.value)
def test_invalid_session_id(self):
self.sock.set_input([smp_hdr.pack(0x53, 0, 1, 0, 0, 0)])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Invalid SMP packet session id' in str(excinfo.value)
def test_invalid_wndw_value(self):
self.sock.set_input([smp_hdr.pack(0x53, 0, 0, 0, 0, 0)])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Invalid WNDW in packet from server' in str(excinfo.value)
def test_invalid_seqnum_value(self):
self.sock.set_input([smp_hdr.pack(0x53, 8, 0, 0, 500, 10)])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Invalid SEQNUM in packet from server' in str(excinfo.value)
def test_invalid_length(self):
self.sock.set_input([smp_hdr.pack(0x53, 8, 0, 0, 1, 10)])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Invalid LENGTH' in str(excinfo.value)
def test_invalid_seqnum_in_data_packet(self):
self.sock.set_input([smp_hdr.pack(0x53, 8, 0, 16, 0, 10)])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Invalid SEQNUM' in str(excinfo.value)
def test_invalid_seqnum_in_ack_packet(self):
self.sock.set_input([smp_hdr.pack(0x53, 2, 0, 16, 1, 10)])
with pytest.raises(pytds.Error) as excinfo:
self.sess.recv_into(self.buf)
assert 'Invalid SEQNUM' in str(excinfo.value)
def test_misc():
sock = MockSock()
mgr = SmpManager(sock)
sess = mgr.create_session()
repr(mgr)
SessionState.to_str(SessionState.SESSION_ESTABLISHED)
SessionState.to_str(SessionState.CLOSED)
SessionState.to_str(SessionState.FIN_RECEIVED)
SessionState.to_str(SessionState.FIN_SENT)
mgr = SmpManager(sock, max_sessions=5)
with pytest.raises(pytds.Error) as excinfo:
for _ in range(10):
mgr.create_session()
assert "Can't create more MARS sessions" in str(excinfo.value)
|
"""Procedure to run and collect latency data.
# INSTRUCTIONS PARALLEL PORT
# http://stefanappelhoff.com/blog/2017/11/23/Triggers-with-Psychopy-on-BrainVision-Recorder
import time
from ctypes import windll
# Opening up the driver (first call is always slower)
assert windll.inpoutx64.IsInpOutDriverOpen()
windll.inpoutx64.Out32(0x378, 1)
time.sleep(0.5)
windll.inpoutx64.Out32(0x378, 0)
# INSTRUCTIONS TRIGGER BOX
# https://www.brainproducts.com/downloads.php?kid=40
import serial
# Open the Windows device manager,
# search for the "TriggerBox VirtualSerial Port (COM6)"
# in "Ports /COM & LPT)" and enter the COM port number in the constructor.
port = serial.Serial("COM6")
# Set the port to an initial state
port.write([0x00])
# Set Bit 0, Pin 2 of the Output(to Amp) connector
port.write([0x01])
# Reset Bit 0, Pin 2 of the Output(to Amp) connector
port.write([0x00])
# Reset the port to its default state
port.write([0xFF])
# Close the serial port
port.close()
"""
import os
import serial
from warnings import warn
from psychopy import visual, event, core # noqa E402
# Make a psychopy window for the flow
# Using pyglet instead of pygame leads to an error with gammaRamp
# For proper timing, set fullscr to True
win = visual.Window(winType='pyglet', fullscr=False)
# Determine minimum time that trigger has to be sent depending on EEG sampling
# frequency. Be generous ...
fs = 5000 # sampling frequency in Hz
trig_wait = (1 / fs) * 2
# Parallel port address
marker_val = 1
# Serial port address
port = serial.Serial('COM4')
port.write([0x00])
# Assert we are running on the correct frame rate
fps = 144
print('Using fps: {}'.format(int(round(win.getActualFrameRate()))))
assert int(round(win.getActualFrameRate())) == fps
# Start the flow
run_loop = True
while run_loop:
for frame in range(fps):
event.waitKeys()
port.write([0x01])
core.wait(trig_wait)
port.write([0x00])
# Flip the window to inquire new key presses that were done meanwhile
core.wait(0.1)
win.flip()
# Clean up
print('\nBye.')
port.write([0xFF])
port.close()
core.wait(1)
win.close()
|
from setuptools import setup
setup(
name='tweetx',
version='0.0.1',
description='In space, everyone can hear you tweet.',
packages=['tweetx', 'tweetx.bot'],
install_requires=[
'tweepy',
'websockets'
]
)
|
# _*_ coding:utf-8 _*_
import re
from app.spider_store.common import (
get_content,
)
fake_headers_mobile = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'
}
def miaopai_download(url):
mobile_page = get_content(url, headers=fake_headers_mobile)
try:
title = re.search(r'([\'"])title\1:\s*([\'"])(.+?)\2,', mobile_page).group(3)
except:
title = re.search(r'([\'"])status_title\1:\s*([\'"])(.+?)\2,', mobile_page).group(3)
title = title.replace('\n', '_')
source = re.search(r'([\'"])screen_name\1:\s*([\'"])(.+?)\2,', mobile_page).group(3)
stream_url = re.search(r'([\'"])stream_url\1:\s*([\'"])(.+?)\2,', mobile_page).group(3)
thumbnail_urls = re.search(
r'[\'"]page_pic[\'"]:[\s\W\S\w]*[\'"]url[\'"]:\s*[\'"](.*?)[\'"],[\s\W\S\w]*},',
mobile_page
).group(1)
ext = 'mp4'
type = news_type(url)
data = {
"type": type,
"title": title,
"source": source,
"thumbnail_urls": [thumbnail_urls],
"image_urls": None,
"video_url": [stream_url],
"ext": ext,
"size": None,
}
return data
def news_type(url):
if url:
return "video"
download = miaopai_download
if __name__ == '__main__':
data = download("https://weibo.com/tv/v/FxU9HCrbu")
print(data)
|
from requests import get,put, delete
from tabulate import tabulate
import pprint
import argparse
import json
pp = pprint.PrettyPrinter(indent=4)
class RestApiClient():
def __init__(self):
self.cmds = {}
def execute_command(self, args):
if getattr(self, args["command"]) is not None:
# call the local method with the same name as the command arg
getattr(self, args["command"])(args)
else:
print("Command not implemented.")
def add(self, args):
vnf_src_name = self._parse_vnf_name(args.get("source"))
vnf_dst_name = self._parse_vnf_name(args.get("destination"))
params = self._create_dict(
vnf_src_interface=self._parse_vnf_interface(args.get("source")),
vnf_dst_interface=self._parse_vnf_interface(args.get("destination")),
weight=args.get("weight"),
match=args.get("match"),
bidirectional=args.get("bidirectional"),
cookie=args.get("cookie"))
response = put("%s/restapi/network/%s/%s" %
(args.get("endpoint"),
vnf_src_name,
vnf_dst_name),
json=json.dumps(params))
pp.pprint(response.json())
def remove(self, args):
vnf_src_name = self._parse_vnf_name(args.get("source"))
vnf_dst_name = self._parse_vnf_name(args.get("destination"))
params = self._create_dict(
vnf_src_interface=self._parse_vnf_interface(args.get("source")),
vnf_dst_interface=self._parse_vnf_interface(args.get("destination")),
weight=args.get("weight"),
match=args.get("match"),
bidirectional=args.get("bidirectional"),
cookie=args.get("cookie"))
response = delete("%s/restapi/network/%s/%s" %
(args.get("endpoint"),
vnf_src_name,
vnf_dst_name),
json=json.dumps(params))
pp.pprint(response.json())
def _parse_vnf_name(self, vnf_name_str):
vnf_name = vnf_name_str.split(':')[0]
return vnf_name
def _parse_vnf_interface(self, vnf_name_str):
try:
vnf_interface = vnf_name_str.split(':')[1]
except:
vnf_interface = None
return vnf_interface
def _create_dict(self, **kwargs):
return kwargs
parser = argparse.ArgumentParser(description='son-emu network')
parser.add_argument(
"command",
choices=['add', 'remove'],
help="Action to be executed.")
parser.add_argument(
"--datacenter", "-d", dest="datacenter",
help="Data center to in which the network action should be initiated")
parser.add_argument(
"--source", "-src", dest="source",
help="vnf name of the source of the chain")
parser.add_argument(
"--destination", "-dst", dest="destination",
help="vnf name of the destination of the chain")
parser.add_argument(
"--weight", "-w", dest="weight",
help="weight metric to calculate the path")
parser.add_argument(
"--match", "-m", dest="match",
help="string holding extra matches for the flow entries")
parser.add_argument(
"--bidirectional", "-b", dest="bidirectional",
action='store_true',
help="add/remove the flow entries from src to dst and back")
parser.add_argument(
"--cookie", "-c", dest="cookie",
help="cookie for this flow, as easy to use identifier (eg. per tenant/service)")
parser.add_argument(
"--endpoint", "-e", dest="endpoint",
default="http://127.0.0.1:5000",
help="UUID of the plugin to be manipulated.")
def main(argv):
args = vars(parser.parse_args(argv))
c = RestApiClient()
c.execute_command(args)
|
import math
from itertools import permutations, repeat
import numpy as np
# 500 m radius is covered by mobike request
radius = 500
# number of km per degree = ~111km
# (between 110.567km at the equator and 111.699km at the poles)
# 1km in degree = 1 / 111.32km = 0.0089
# 1m in degree = 0.0089 / 1000 = 0.0000089
coef = 2*radius * 0.0000089
# bounding box Berlin
# start_lat = 52.341823
# start_long = 13.088209
# end_lat = 52.669724
# end_long = 13.760610
# small bounding box Berlin
end_lat = 52.548463
end_long = 13.472698
start_lat = 52.468009
start_long = 13.293994
def get_new_lat(old_lat):
return (old_lat + coef)
# pi / 180 = 0.018
def get_new_long(old_long):
return (old_long + coef / math.cos(start_lat * 0.018))
# get all lats:
first_row_lats = []
second_row_lats = []
current_lat1 = start_lat
current_lat2 = start_lat + radius * 0.0000089
while current_lat1 < end_lat:
first_row_lats.append(current_lat1)
second_row_lats.append(current_lat2)
current_lat1 = get_new_lat(current_lat1)
current_lat2 = get_new_lat(current_lat2)
# get all longs:
first_row_longs = []
second_row_longs = []
current_long1 = start_long
current_long2 = start_long + (radius * 0.0000089) / math.cos(start_lat * 0.018)
while current_long1 < end_long:
first_row_longs.append(current_long1)
second_row_longs.append(current_long2)
current_long1 = get_new_long(current_long1)
current_long2 = get_new_long(current_long2)
all_coordinates = np.array([]).reshape(0,2)
for long in first_row_longs:
coordinates = np.array(list(zip(first_row_lats, np.repeat(long, len(first_row_lats)))))
all_coordinates = np.append(all_coordinates, coordinates, axis = 0)
for long in second_row_longs:
coordinates = np.array(list(zip(second_row_lats, np.repeat(long, len(second_row_lats)))))
all_coordinates = np.append(all_coordinates, coordinates, axis = 0)
np.savetxt("coordinates.csv", all_coordinates, header= 'lat, long', delimiter=",", fmt="%10.6f")
# np.savetxt('coordinates.txt', all_coordinates, delimiter=", ", header="[", newline = "],[", footer = "]", fmt="%10.6f")
|
#!/usr/bin python
# -*- coding:utf-8 -*-
"""
Logging functionality: color logger, log-friendly timestamp...
"""
import logging
from typing import Optional
import socket
import sys
import datetime
#
import pytz
#
import coloredlogs
# ##############################################################################
# # COLOR LOGGER
# ##############################################################################
def make_timestamp(timezone="Europe/London", with_tz_output=True):
"""
Output example: day, month, year, hour, min, sec, milisecs:
10_Feb_2018_20:10:16.151
"""
ts = datetime.datetime.now(tz=pytz.timezone(timezone)).strftime(
"%Y_%m_%d_%H:%M:%S.%f")[:-3]
if with_tz_output:
return "%s(%s)" % (ts, timezone)
else:
return ts
class HostnameFilter(logging.Filter):
"""
Needed to include hostname into the logger. See::
https://stackoverflow.com/a/55584223/4511978
"""
def filter(self, record) -> bool:
record.hostname = socket.gethostname()
return True
class ColorLogger:
"""
This class:
1. Creates a ``logging.Logger`` with a convenient configuration.
2. Attaches ``coloredlogs.install`` to it for colored terminal output
3. Provides some wrapper methods for convenience
Usage example::
# create 2 loggers
cl1 = ColorLogger("term.and.file.logger", "/tmp/test.txt")
cl2 = ColorLogger("JustTermLogger")
# use them at wish
cl1.logger.debug("this is a debugging message")
cl2.logger.info("this is an informational message")
cl1.logger.warning("this is a warning message")
cl2.logger.error("this is an error message")
cl1.logger.critical("this is a critical message")
"""
FORMAT_STR = ("%(asctime)s.%(msecs)03d %(hostname)s: %(name)s" +
"[%(process)d] %(levelname)s %(message)s")
def get_logger(self, logger_name, logfile_path: Optional[str],
filemode: str = "a",
logging_level: int = logging.DEBUG) -> logging.Logger:
"""
:param filemode: In case ``logfile_path`` is given, this specifies the
output mode (e.g. 'a' for append).
:returns: a ``logging.Logger`` configured to output all events at level
``self.logging_level`` or above into ``sys.stdout`` and (optionally)
the given ``logfile_path``, if not None.
"""
# create logger, formatter and filter, and set desired logger level
logger = logging.getLogger(logger_name)
formatter = logging.Formatter(self.FORMAT_STR,
datefmt="%Y-%m-%d %H:%M:%S")
hostname_filter = HostnameFilter()
logger.setLevel(logging_level)
# create and wire stdout handler
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.addFilter(hostname_filter)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
# optionally, create and wire file handler
if logfile_path is not None:
# create one handler for print and one for export
file_handler = logging.FileHandler(logfile_path, filemode)
file_handler.addFilter(hostname_filter)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
#
return logger
def __init__(self, logger_name: str,
logfile_path: Optional[str] = None,
filemode: str = "a",
logging_level: int = logging.DEBUG):
"""
:param logger_name: A process may have several loggers. This parameter
distinguishes them.
:param logfile_path: Where to write out.
"""
self.logger: logging.Logger = self.get_logger(logger_name,
logfile_path, filemode,
logging_level)
#
coloredlogs.install(logger=self.logger,
fmt=self.FORMAT_STR,
level=logging_level)
# a few convenience wrappers:
def debug(self, *args, **kwargs) -> None:
self.logger.debug(*args, **kwargs)
def info(self, *args, **kwargs) -> None:
self.logger.info(*args, **kwargs)
def warning(self, *args, **kwargs) -> None:
self.logger.warning(*args, **kwargs)
def error(self, *args, **kwargs) -> None:
self.logger.error(*args, **kwargs)
def critical(self, *args, **kwargs) -> None:
self.logger.critical(*args, **kwargs)
|
import os
import discord
import asyncio
import configparser
from bot.commands import Command
client = discord.Client()
@client.event
@asyncio.coroutine
def on_ready():
print('Logged in as: {0} - {1}'.format(client.user.name, client.user.id))
print('-'*20)
@client.event
@asyncio.coroutine
def on_message(message):
command = message.content.lower()
if message.author == client.user:
return
elif command == '!':
yield from client.send_message(message.channel, '<@{0}>, No command has been passed.'.format(message.author.id))
elif command.startswith('!leet'):
response = Command.leet_speak(command.replace('!leet', ''))
yield from client.send_message(message.channel, '{0}'.format(response))
# Set up the base bot
class DiscordBot(object):
def __init__(self):
self.token = None
self.config = configparser.ConfigParser()
def create_config(self):
# Ask user for bot token
# self.token = input('Bot Token:')
# Creates base config file
self.config.add_section('DiscordBot')
self.config.set('DiscordBot', 'token', os.getenv("TOKEN"))
with open('{0}\{1}'.format(os.getcwd(), 'config.ini'), 'w') as configfile:
self.config.write(configfile)
def get_token(self):
self.config.read('{0}\{1}'.format(os.getcwd(), 'config.ini'))
self.token = self.config.get('DiscordBot', 'token')
def set_token(self, token):
self.config.read('{0}\{1}'.format(os.getcwd(), 'config.ini'))
self.config.set('DiscordBot', 'token', token)
with open('{0}\{1}'.format(os.getcwd(), 'config.ini'), 'w') as configfile:
self.config.write(configfile)
def run(self):
client.run(self.token)
|
from typing import Iterable
from torch import Tensor
from torch.nn import Sequential, ModuleList
from ..neko_module import NekoModule
from ..layer import Concatenate
from ..util import ModuleFactory
class DenseBlock(NekoModule):
"""
The DenseBlock can be used to build a block with repeatable submodules with dense connections. This structure is
proposed by Huang, Liu, Van Der Maaten, & Weinberger (2017).
Args:
sub_module_layers (``List`` [``(int) -> torch.nn.Module``]):
A collection of module factory builder to build a "layer" in DenseBlock. In the DenseBlock, there will be a
submodule generated repeatedly for several times. The factory function takes an repeat_index as input, and
build a :class:`~torch.nn.Module`.
repeat (``int``): Number of repeats for each layer in DenseBlock.
Attributes:
build_sub_module (``(int) -> torch.nn.Module``): The module factory function to build a submodule in DenseBlock.
sub_modules (:class:`~torch.nn.ModuleList`): The ModuleList of all submodules.
concatenates (:class:`~torch.nn.ModuleList`): The ModuleList of all Concatenate layers in DenseBlock.
Examples::
# batch norm builder
def build_bn(i=0):
return BatchNorm2d(2 ** i * self.c)
# conv2d builder
def build_conv2d_1x1(i=0):
return Conv2d(2 ** i * self.c, 2 ** i * self.c * 4, (1, 1), build_activation=ReLU,
build_normalization=lambda: BatchNorm2d(2 ** i * self.c * 4))
# conv
def build_conv2d_3x3(i=0):
return Conv2d(2 ** i * self.c * 4, 2 ** i * self.c, (3, 3), padding=(1, 1))
dense_block = tensorneko.module.DenseBlock((
build_bn,
lambda i: ReLU(),
build_conv2d_1x1,
build_conv2d_3x3
), repeat=4)
References:
Huang, G., Liu, Z., Van Der Maaten, L., & Weinberger, K. Q. (2017). Densely connected convolutional networks.
In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 4700-4708).
"""
def __init__(self, sub_module_layers: Iterable[ModuleFactory], repeat: int = 2):
super().__init__()
def build_sub_module(i):
return Sequential(*[
build_layer(i) for build_layer in sub_module_layers
])
self.build_sub_module = build_sub_module
self.sub_modules = ModuleList([
self.build_sub_module(i) for i in range(repeat)
])
self.concatenates = ModuleList([
Concatenate(dim=1) for _ in range(repeat)
])
def forward(self, x: Tensor) -> Tensor:
xs = []
for i in range(len(self.sub_modules)):
# concat with previous output
xs.append(x)
if i != 0:
x = self.concatenates[i - 1](xs)
# forward with submodule
x = self.sub_modules[i](x)
xs.append(x)
x = self.concatenates[-1](xs)
return x
|
import os
import sys
from girder.api import access
from girder.api.describe import autoDescribeRoute, Description
from girder.api.rest import getCurrentUser, getBodyJson, RestException
from girder.constants import TokenScope
from cumulus.taskflow import load_class
from cumulus_plugin.models.cluster import Cluster as ClusterModel
from queues.models.queue import Queue as QueueModel, QueueType
from taskflow.models.taskflow import Taskflow as TaskflowModel
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Launch a taskflow.')
.param('body',
'Contains "taskFlowBody" and "taskFlowInput" for the taskflow and task',
paramType='body')
)
def launch_taskflow_endpoint():
user = getCurrentUser()
body = getBodyJson()
return launch_taskflow(user, body)
def launch_taskflow(user, body):
# Perform some validation
taskFlowBody = body.get('taskFlowBody')
if taskFlowBody is None:
raise RestException('taskFlowBody is a required key')
if 'taskFlowClass' not in taskFlowBody:
raise RestException('taskFlowClass is required in taskFlowBody')
taskflow_class = taskFlowBody['taskFlowClass']
# Check that we can load the taskflow class
try:
load_class(taskflow_class)
except Exception as ex:
msg = 'Unable to load taskflow class: %s (%s)' % \
(taskflow_class, ex)
raise RestException(msg, 400)
# Set up the taskflow input
taskFlowInput = body.get('taskFlowInput', {})
if 'cluster' not in taskFlowInput:
# Make a cluster
taskFlowInput['cluster'] = create_cluster_object(user)
if 'container' not in taskFlowInput:
taskFlowInput['container'] = 'docker'
# Load the queue
queue = fetch_or_create_queue(user)
# Create the taskflow
taskflow = TaskflowModel().create(user, taskFlowBody)
# Add it to the queue and start it
QueueModel().add(queue, taskflow, taskFlowInput, user)
QueueModel().pop(queue, limit=sys.maxsize, user=user)
return taskflow['_id']
def _nersc():
return os.environ.get('OC_SITE') == 'NERSC'
def fetch_or_create_queue(user):
# Fetch or create the queue
name = 'oc_queue'
queues = list(QueueModel().find(name=name, user=user))
if len(queues) > 0:
queue = queues[0]
else:
type = QueueType.FIFO
queue = QueueModel().create(name, type_=type, max_running=5, user=user)
return queue
def create_cluster_object(user=None):
if _nersc():
return {'name': 'cori'}
# Get the first cluster we can find
clusters = ClusterModel().find_cluster({}, user=user)
if len(clusters) > 0:
return {'_id': clusters[0]['_id']}
raise Exception('Unable to register images, no cluster configured')
|
"""
Module detecting usage of `tx.origin` in a conditional node
"""
from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
class TxOrigin(AbstractDetector):
"""
Detect usage of tx.origin in a conditional node
"""
ARGUMENT = "tx-origin"
HELP = "Dangerous usage of `tx.origin`"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = (
"https://github.com/crytic/slither/wiki/Detector-Documentation#dangerous-usage-of-txorigin"
)
WIKI_TITLE = "Dangerous usage of `tx.origin`"
WIKI_DESCRIPTION = "`tx.origin`-based protection can be abused by a malicious contract if a legitimate user interacts with the malicious contract."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract TxOrigin {
address owner = msg.sender;
function bug() {
require(tx.origin == owner);
}
```
Bob is the owner of `TxOrigin`. Bob calls Eve's contract. Eve's contract calls `TxOrigin` and bypasses the `tx.origin` protection."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Do not use `tx.origin` for authorization."
@staticmethod
def _contains_incorrect_tx_origin_use(node):
"""
Check if the node reads tx.origin and doesn't read msg.sender
Avoid the FP due to (msg.sender == tx.origin)
Returns:
(bool)
"""
solidity_var_read = node.solidity_variables_read
if solidity_var_read:
return any(v.name == "tx.origin" for v in solidity_var_read) and all(
v.name != "msg.sender" for v in solidity_var_read
)
return False
def detect_tx_origin(self, contract):
ret = []
for f in contract.functions:
nodes = f.nodes
condtional_nodes = [
n for n in nodes if n.contains_if() or n.contains_require_or_assert()
]
bad_tx_nodes = [
n for n in condtional_nodes if self._contains_incorrect_tx_origin_use(n)
]
if bad_tx_nodes:
ret.append((f, bad_tx_nodes))
return ret
def _detect(self):
"""Detect the functions that use tx.origin in a conditional node"""
results = []
for c in self.contracts:
values = self.detect_tx_origin(c)
for func, nodes in values:
for node in nodes:
info = [func, " uses tx.origin for authorization: ", node, "\n"]
res = self.generate_result(info)
results.append(res)
return results
|
# -*- coding: utf-8 -*-
import json
import pytest
from sanic import Sanic, Blueprint
from sanic.testing import SanicTestClient
from sanic.websocket import WebSocketProtocol
from spf import SanicPluginsFramework
import sanic_restplus
from sanic_restplus import restplus
# class TestClient(SanicTestClient):
# def get_json(self, url, status=200, **kwargs):
# response = self.get(url, **kwargs)
# assert response.status_code == status
# assert response.content_type == 'application/json'
# return json.loads(response.data.decode('utf8'))
#
# def post_json(self, url, data, status=200, **kwargs):
# response = self.post(url, data=json.dumps(data),
# headers={'content-type': 'application/json'})
# assert response.status_code == status
# assert response.content_type == 'application/json'
# return json.loads(response.data.decode('utf8'))
#
# def get_specs(self, prefix='', status=200, **kwargs):
# '''Get a Swagger specification for a RestPlus API'''
# return self.get_json('{0}/swagger.json'.format(prefix), status=status, **kwargs)
@pytest.fixture
def app():
app = Sanic(__name__)
#app.test_client_class = TestClient
spf = SanicPluginsFramework(app)
spf.register_plugin(restplus)
yield app
@pytest.yield_fixture
def api(request, app):
marker = request.node.get_closest_marker('api')
bpkwargs = {}
kwargs = {}
if marker:
if 'prefix' in marker.kwargs:
bpkwargs['url_prefix'] = marker.kwargs.pop('prefix')
if 'subdomain' in marker.kwargs:
bpkwargs['subdomain'] = marker.kwargs.pop('subdomain')
kwargs = marker.kwargs
blueprint = Blueprint('api', __name__, **bpkwargs)
api = sanic_restplus.Api(blueprint, **kwargs)
app.register_blueprint(blueprint)
yield api
@pytest.fixture
def client(loop, app, sanic_client):
return loop.run_until_complete(sanic_client(app, protocol=WebSocketProtocol))
@pytest.fixture(autouse=True)
def _push_custom_request_context(request):
app = request.getfixturevalue('app')
options = request.node.get_closest_marker('request_context')
if options is None:
return
ctx = app.test_request_context(*options.args, **options.kwargs)
ctx.push()
def teardown():
ctx.pop()
request.addfinalizer(teardown)
|
import re
import urllib.request
import json
import logging
from telegram import Update, ForceReply
from telegram.ext import Updater, CommandHandler, ConversationHandler, MessageHandler, CallbackContext, Filters
from bs4 import BeautifulSoup
default_path_bike_json_file = 'bike.json'
default_path_config_json_file = 'config.json'
default_log_file = 'log.txt'
bike_config = {}
config = {}
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO, filename=default_log_file,
filemode='a'
)
logger = logging.getLogger(__name__)
NAME, SIZE, LINK = range(3)
NAME_DELETE = range(1)
def read_json_file(path):
logger.info("Read json file %s", path)
with open(path) as f:
return json.load(f)
def write_json_file(data, path):
with open(path, 'w') as outfile:
json.dump(data, outfile)
logger.info("Write json file %s with data: %s", path, data)
def check_bikes():
bike_data = bike_config['bikes']
html_config = config['html']
for name_bike, data in bike_data.items():
webUrl = urllib.request.urlopen(data['link'])
data['availability'] = {}
data['link_status'] = str(webUrl.getcode())
data['your_size'] = False
htmlText = webUrl.read()
soup = BeautifulSoup(htmlText, 'html.parser')
for link in soup.find_all(html_config['ship_tag']['tag'], class_=html_config['ship_tag']['class']):
size = link.find(html_config['size_tag']['tag'], class_=html_config['size_tag']['class']).getText().replace(
" ", "").replace("\n", "")
if re.search(size, data['size'], re.IGNORECASE):
data['your_size'] = True
availability = link.find(html_config['availabilityMessage_tag']['tag'],
class_=html_config['availabilityMessage_tag']['class'])
if availability is None:
startShipDate = link.find(html_config['date_tag']['tag'],
class_=html_config['date_tag']['class_start_date']).getText()
endShipDate = link.find(html_config['date_tag']['tag'],
class_=html_config['date_tag']['class_end_date']).getText()
availabilityText = "Ship Date : " + startShipDate + " to " + endShipDate
else:
availabilityText = availability.getText().replace(" ", "").replace("\n", "")
data['availability'] = availabilityText
def bike_avaible_list_to_str():
str = "Available Bike: \n"
buy = False
bike_data = bike_config['bikes']
for name_bike, data in bike_data.items():
if data["your_size"]:
buy = True
str = str + name_bike + "\n"
str = str + data["link"] + "\n"
str = str + data["size"] + " : " + data["availability"] + "\n"
if buy:
return str
else:
return None
def bike_list_to_str():
if len(bike_config['bikes']) > 0:
str = "Bike List: \n"
bike_data = bike_config['bikes']
for name_bike, data in bike_data.items():
str = str + " Name: " + name_bike + "\n"
str = str + " Link: " + data["link"] + "\n"
str = str + " Size: " + data["size"] + "\n"
else:
str = "No bike in list"
return str
def help_bot_command(update: Update, context: CallbackContext) -> None:
str = "/list to see the list of bikes to check \n" \
"/add to add new bike to check \n" \
"/remove to remove a bike from the list \n"
update.message.reply_text(str)
def check_bike_bot_callback(context: CallbackContext):
logger.info("Check Bike..")
if len(bike_config['bikes']) > 0:
check_bikes()
logger.info(bike_config['bikes'])
strtosend = bike_avaible_list_to_str()
if strtosend is not None:
logger.info("Send bike Update...")
context.bot.send_message(chat_id=config['params']['userid'], text=strtosend)
else:
logger.info("No bike avaible")
else:
logger.info("No bike in dictionary")
def add_bike_link_bot_command(update: Update, context: CallbackContext) -> None:
if update.message.chat.id == config['params']['userid']:
logger.info("%s add new bike command", update.effective_user.full_name)
update.message.reply_text('Bike Name:')
return NAME
else:
logger.error("%s send command /add but not have permission", update.effective_user.full_name)
update.message.reply_text('Action Not permited')
return ConversationHandler.END
def bike_name_bot(update: Update, context: CallbackContext) -> None:
logger.info("%s add name bike: %s", update.effective_user.full_name, update.message.text)
bike_config['bikes'][update.message.text] = {}
context.user_data['bike_name'] = update.message.text
update.message.reply_text('Bike Size:')
return SIZE
def bike_size_bot(update: Update, context: CallbackContext) -> None:
logger.info("%s add size bike: %s", update.effective_user.full_name, update.message.text)
bike_config['bikes'][context.user_data['bike_name']]['size'] = update.message.text
update.message.reply_text('Bike Link:')
return LINK
def bike_link_bot(update: Update, context: CallbackContext) -> None:
logger.info("%s add link bike: %s", update.effective_user.full_name, update.message.text)
bike_config['bikes'][context.user_data['bike_name']]['link'] = update.message.text
update.message.reply_text('add')
logger.info(bike_config)
write_json_file(bike_config, default_path_bike_json_file)
return ConversationHandler.END
def bike_list_bot(update: Update, context: CallbackContext) -> None:
if update.message.chat.id == config['params']['userid']:
logger.info("%s list command", update.effective_user.full_name)
update.message.reply_text(bike_list_to_str())
else:
logger.error("%s send command /list but not have permission", update.effective_user.full_name)
update.message.reply_text('Action Not permited')
def remove_bike_bot_start(update: Update, context: CallbackContext) -> None:
if update.message.chat.id == config['params']['userid']:
logger.info("%s send /remove command ", update.effective_user.full_name)
update.message.reply_text('Bike Name:')
return NAME_DELETE
else:
logger.error("%s send command /remove but not have permission", update.effective_user.full_name)
update.message.reply_text('Action Not permited')
return ConversationHandler.END
def remove_bike_bot_end(update: Update, context: CallbackContext) -> None:
bike_name = update.message.text
logger.info("%s send command /remove with %s", update.effective_user.full_name, bike_name)
if bike_name in bike_config['bikes'].keys():
bike_config['bikes'].pop(bike_name)
write_json_file(bike_config, default_path_bike_json_file)
update.message.reply_text('Deleted')
logger.info("%s, %s bike deleted", update.effective_user.full_name, bike_name)
else:
logger.info("%s send /remove command with %s but not exist", update.effective_user.full_name, bike_name)
update.message.reply_text('Not present')
return ConversationHandler.END
def cancel(update: Update, context: CallbackContext) -> int:
user = update.message.from_user
logger.info("%s cancel", update.effective_user.full_name)
update.message.reply_text('Cancel')
return ConversationHandler.END
def main() -> None:
logger.info("Start bot")
print("Start bot")
updater = Updater(config['params']['token'])
j = updater.job_queue
job_minute = j.run_repeating(check_bike_bot_callback, interval=config['params']['time'], first=1)
dispatcher = updater.dispatcher
add_bike_con_handler = ConversationHandler(
entry_points=[CommandHandler('add', add_bike_link_bot_command)],
states={
NAME: [MessageHandler(Filters.text, bike_name_bot)],
SIZE: [MessageHandler(Filters.text, bike_size_bot)],
LINK: [MessageHandler(Filters.all, bike_link_bot)]
},
fallbacks=[CommandHandler('cancel', cancel)],
)
remove_bike_con_handler = ConversationHandler(
entry_points=[CommandHandler('remove', remove_bike_bot_start)],
states={
NAME_DELETE: [MessageHandler(Filters.text, remove_bike_bot_end)],
},
fallbacks=[CommandHandler('cancel', cancel)],
)
dispatcher.add_handler(add_bike_con_handler)
dispatcher.add_handler(remove_bike_con_handler)
dispatcher.add_handler(CommandHandler("help", help_bot_command))
dispatcher.add_handler(CommandHandler("list", bike_list_bot))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
bike_config = read_json_file(default_path_bike_json_file)
config = read_json_file(default_path_config_json_file)
main()
|
from typing import Any, Dict, List, Optional, Sized, Tuple
from boa3.model.builtin.method.builtinmethod import IBuiltinMethod
from boa3.model.expression import IExpression
from boa3.model.type.primitive.primitivetype import PrimitiveType
from boa3.model.type.type import IType, Type
from boa3.model.variable import Variable
from boa3.neo.vm.opcode.Opcode import Opcode
class ScriptHashMethod(IBuiltinMethod):
def __init__(self, data_type: IType = None):
if (not Type.int.is_type_of(data_type)
and not Type.str.is_type_of(data_type)
and not Type.bytes.is_type_of(data_type)):
data_type = Type.any
identifier = 'to_script_hash'
args: Dict[str, Variable] = {'self': Variable(data_type)}
super().__init__(identifier, args, return_type=Type.bytes)
def validate_parameters(self, *params: IExpression) -> bool:
if len(params) != 1:
return False
if not isinstance(params[0], IExpression):
return False
return isinstance(params[0].type, PrimitiveType)
@property
def is_supported(self) -> bool:
return self.args['self'].type is not Type.any
@property
def opcode(self) -> List[Tuple[Opcode, bytes]]:
from boa3.constants import SIZE_OF_INT160
from boa3.model.builtin.interop.stdlib.base58decodemethod import Base58DecodeMethod
from boa3.model.type.type import Type
from boa3.neo.vm.type.Integer import Integer
opcodes = [
(Opcode.DUP, b''), # convert value to string
(Opcode.SIZE, b''),
(Opcode.JMPIFNOT, Integer(36).to_byte_array(signed=True, min_length=1)),
(Opcode.DUP, b''), # convert value to string
(Opcode.ISTYPE, Type.str.stack_item),
(Opcode.JMPIF, Integer(4).to_byte_array(min_length=1)),
(Opcode.CONVERT, Type.str.stack_item),
(Opcode.PUSH1, b''),
(Opcode.PACK, b'')
]
opcodes.extend(Base58DecodeMethod().opcode)
script_len = Integer(SIZE_OF_INT160).to_byte_array(min_length=1)
opcodes.extend([
(Opcode.DUP, b''), # if len(result) > SIZE_OF_INT160, truncates the result
(Opcode.SIZE, b''),
(Opcode.PUSHDATA1, Integer(len(script_len)).to_byte_array(min_length=1) + script_len),
(Opcode.CONVERT, Type.int.stack_item),
(Opcode.JMPGT, Integer(8).to_byte_array(min_length=1, signed=True)),
(Opcode.DUP, b''),
(Opcode.SIZE, b''), # first byte identifies address version
(Opcode.DEC, b''),
(Opcode.RIGHT, b''),
(Opcode.JMP, Integer(9).to_byte_array(min_length=1, signed=True)),
(Opcode.PUSH1, b''),
(Opcode.PUSHDATA1, Integer(len(script_len)).to_byte_array(min_length=1) + script_len),
(Opcode.CONVERT, Type.int.stack_item),
(Opcode.SUBSTR, b''),
(Opcode.CONVERT, Type.bytes.stack_item)
])
return opcodes
@property
def _args_on_stack(self) -> int:
return len(self.args)
@property
def _body(self) -> Optional[str]:
return None
def build(self, value: Any) -> IBuiltinMethod:
if 'self' in self.args and self.args['self'].type is not Type.any:
return self
from boa3.model.type.primitive.inttype import IntType
from boa3.model.type.primitive.strtype import StrType
from boa3.model.type.primitive.bytestype import BytesType
if isinstance(value, Sized) and len(value) == 1:
value = value[0]
if isinstance(value, (IntType, StrType, BytesType)):
return ScriptHashMethod(value)
elif isinstance(value, IType):
return ScriptHashMethod(Type.bytes)
return super().build(value)
|
# Generated by Django 3.0.6 on 2020-05-13 17:55
import uuid
import django.contrib.gis.db.models.fields
import django.contrib.postgres.fields.ranges
import django.db.models.deletion
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Encampment",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created_at", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("name", models.TextField()),
(
"canonical_location",
django.contrib.gis.db.models.fields.PointField(srid=4326),
),
],
options={"abstract": False,},
),
migrations.CreateModel(
name="Organization",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created_at", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("name", models.TextField()),
],
options={"abstract": False,},
),
migrations.CreateModel(
name="Report",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created_at", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("date", models.DateField()),
(
"recorded_location",
django.contrib.gis.db.models.fields.PointField(
null=True, srid=4326
),
),
("supplies_delivered", models.TextField(blank=True)),
("food_delivered", models.TextField(blank=True)),
(
"occupancy",
django.contrib.postgres.fields.ranges.IntegerRangeField(null=True),
),
("talked_to", models.IntegerField()),
("assessed", models.IntegerField()),
("assessed_asymptomatic", models.IntegerField()),
("needs", models.TextField(blank=True)),
("notes", models.TextField(blank=True)),
(
"encampment",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="reporting.Encampment",
),
),
(
"performed_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="reporting.Organization",
),
),
],
options={"abstract": False,},
),
migrations.CreateModel(
name="ScheduledVisit",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created_at", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("date", models.DateField()),
(
"encampment",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="reporting.Encampment",
),
),
(
"report",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="reporting.Report",
),
),
],
options={"abstract": False,},
),
]
|
"""
Random walk algorithm implementation for a mobile robot
equipped with 4 ranger sensors (front, back, left and right)
for obstacles detection
author: Ruslan Agishev (agishev_ruslan@mail.ru)
reference: https://ieeexplore.ieee.org/abstract/document/6850799/s
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
def plot_arrow(x, y, yaw, length=5, width=1): # pragma: no cover
plt.arrow(x, y, length * np.cos(yaw), length * np.sin(yaw),
head_length=width, head_width=width)
plt.plot(x, y)
def plot_robot(pose, params):
r = int(100*params.sensor_range_m)
plt.plot([pose[0]-r*np.cos(pose[2]), pose[0]+r*np.cos(pose[2])],
[pose[1]-r*np.sin(pose[2]), pose[1]+r*np.sin(pose[2])], '--', color='b')
plt.plot([pose[0]-r*np.cos(pose[2]+np.pi/2), pose[0]+r*np.cos(pose[2]+np.pi/2)],
[pose[1]-r*np.sin(pose[2]+np.pi/2), pose[1]+r*np.sin(pose[2]+np.pi/2)], '--', color='b')
plt.plot(pose[0], pose[1], 'ro', markersize=5)
plot_arrow(pose[0], pose[1], pose[2])
def obstacle_check(pose, params):
gmap = params.gmap
r = int(100*params.sensor_range_m)
back = [pose[0]-r*np.cos(pose[2]), pose[1]-r*np.sin(pose[2])]
front = [pose[0]+r*np.cos(pose[2]), pose[1]+r*np.sin(pose[2])]
right = [pose[0]+r*np.cos(pose[2]+np.pi/2), pose[1]+r*np.sin(pose[2]+np.pi/2)]
left = [pose[0]-r*np.cos(pose[2]+np.pi/2), pose[1]-r*np.sin(pose[2]+np.pi/2)]
pi = np.array(pose[:2], dtype=int)
backi = np.array(back, dtype=int)
fronti = np.array(front, dtype=int)
lefti = np.array(left, dtype=int)
righti = np.array(right, dtype=int)
obstacle = {
'front': 0,
'back': 0,
'right': 0,
'left': 0,
}
for i in np.arange(min(pi[0], fronti[0]), max(pi[0], fronti[0])+1):
for j in np.arange(min(pi[1], fronti[1]), max(pi[1], fronti[1])+1):
m = min(j, gmap.shape[0]-1); n = min(i, gmap.shape[1]-1)
if gmap[m,n]:
# print('FRONT collision')
obstacle['front'] = 1
for i in np.arange(min(pi[0], backi[0]), max(pi[0], backi[0])+1):
for j in np.arange(min(pi[1], backi[1]), max(pi[1], backi[1])+1):
m = min(j, gmap.shape[0]-1); n = min(i, gmap.shape[1]-1)
if gmap[m,n]:
# print('BACK collision')
obstacle['back'] = 1
for i in np.arange(min(pi[0], lefti[0]), max(pi[0], lefti[0])+1):
for j in np.arange(min(pi[1], lefti[1]), max(pi[1], lefti[1])+1):
m = min(j, gmap.shape[0]-1); n = min(i, gmap.shape[1]-1)
if gmap[m,n]:
# print('LEFT collision')
obstacle['left'] = 1
for i in np.arange(min(pi[0], righti[0]), max(pi[0], righti[0])+1):
for j in np.arange(min(pi[1], righti[1]), max(pi[1], righti[1])+1):
m = min(j, gmap.shape[0]-1); n = min(i, gmap.shape[1]-1)
if gmap[m,n]:
# print('RIGHT collision')
obstacle['right'] = 1
return obstacle
def meters2grid(pose_m, params):
# [0, 0](m) -> [100, 100]
# [1, 0](m) -> [100+100, 100]
# [0,-1](m) -> [100, 100-100]
nrows = int(params.map_width_m / params.map_resolution_m)
ncols = int(params.map_length_m / params.map_resolution_m)
if np.isscalar(pose_m):
pose_on_grid = int( pose_m/params.map_resolution_m + ncols/2 )
else:
pose_on_grid = np.array( np.array(pose_m)/params.map_resolution_m + np.array([ncols/2, nrows/2]), dtype=int )
return pose_on_grid
def grid2meters(pose_grid, params):
# [100, 100] -> [0, 0](m)
# [100+100, 100] -> [1, 0](m)
# [100, 100-100] -> [0,-1](m)
nrows = int(params.map_width_m / params.map_resolution_m)
ncols = int(params.map_length_m / params.map_resolution_m)
if np.isscalar(pose_grid):
pose_meters = (pose_grid - ncols/2) * params.map_resolution_m
else:
pose_meters = ( np.array(pose_grid) - np.array([ncols/2, nrows/2]) ) * params.map_resolution_m
return pose_meters
def left_shift(pose, r):
left = [pose[0]+r*np.cos(pose[2]+np.pi/2), pose[1]+r*np.sin(pose[2]+np.pi/2)]
return left
def right_shift(pose, r):
right = [pose[0]-r*np.cos(pose[2]+np.pi/2), pose[1]-r*np.sin(pose[2]+np.pi/2)]
return right
def back_shift(pose, r):
back = pose
back[:2] = [pose[0]-r*np.cos(pose[2]), pose[1]-r*np.sin(pose[2])]
return back
def draw_map(obstacles, params):
ax = plt.gca()
ax.set_xlim([-2.5, 2.5])
ax.set_ylim([-2.5, 2.5])
w = params.map_length_m; l = params.map_width_m; c = params.map_center
boundaries = np.array([ c+[-w/2., -l/2.], c+[-w/2., +l/2.], c+[+w/2., +l/2.], c+[+w/2., -l/2.] ])
ax.add_patch( Polygon(boundaries, linewidth=2, edgecolor='k',facecolor='none') )
for k in range(len(obstacles)):
ax.add_patch( Polygon(obstacles[k], color='k', zorder=10) )
def visualize(traj, pose, params):
plt.plot(traj[:,0], traj[:,1], 'g')
# plot_robot(pose, params)
plt.legend()
def add_obstacles_to_grid_map(obstacles, params):
""" Obstacles dicretized map """
grid = params.gmap
# rectangular obstacles
for obstacle in obstacles:
x1 = meters2grid(obstacle[0][1], params); x2 = meters2grid(obstacle[2][1], params)
y1 = meters2grid(obstacle[0][0], params); y2 = meters2grid(obstacle[2][0], params)
if x1 > x2: tmp = x2; x2 = x1; x1 = tmp
if y1 > y2: tmp = y2; y2 = y1; y1 = tmp
grid[x1:x2, y1:y2] = 1
return grid
class Params:
def __init__(self):
self.map_center = np.array([0, 0])
self.map_width_m = 2.0
self.map_length_m = 2.0
self.map_resolution_m = 0.01 # [m]
self.sensor_range_m = 0.1
self.wall_thickness_m = 1.0*self.sensor_range_m
self.simulation_time = 10 # [sec]
self.numiters = 1000
self.animate = 0
self.vel = 0.5 # [m/s]
self.create_borders_grid_map()
def create_borders_grid_map(self):
WIDTH = int(100 * (self.map_width_m))
LENGTH = int(100 * (self.map_length_m))
border = int(100 * self.wall_thickness_m)
gmap = np.zeros([WIDTH, LENGTH])
# walls
gmap[:border, :] = 1
gmap[-border:, :] = 1
gmap[:, :border] = 1
gmap[:, -border:] = 1
self.gmap = gmap
params = Params()
obstacles = [
np.array([[0.7, -0.9], [1.3, -0.9], [1.3, -0.8], [0.7, -0.8]]) + np.array([-1.0, 0.0]),
np.array([[0.7, -0.9], [1.3, -0.9], [1.3, -0.8], [0.7, -0.8]]) + np.array([-1.0, 0.5]),
np.array([[0.7, -0.9], [0.8, -0.9], [0.8, -0.3], [0.7, -0.3]]) + np.array([-1.0, 0.0]),
]
params.gmap = add_obstacles_to_grid_map(obstacles, params)
def main():
# x, y, yaw
pose = [0.3, 0.6, -np.pi/3]
traj = pose[:2]
plt.figure(figsize=(10,10))
draw_map(obstacles, params)
plt.plot(pose[0], pose[1], 'ro', markersize=20, label='Initial position')
# while True:
for _ in range(params.numiters):
dv = 0.1*params.vel
pose[0] += dv*np.cos(pose[2])
pose[1] += dv*np.sin(pose[2])
pose_grid = meters2grid(pose[:2], params)
boundary = obstacle_check([pose_grid[0], pose_grid[1], pose[2]], params)
# print(boundary)
if boundary['right'] or boundary['front']:
pose = back_shift(pose, 0.03)
pose[2] -= np.pi/2 * np.random.uniform(0.2, 0.6)
elif boundary['left']:
pose = back_shift(pose, 0.03)
pose[2] += np.pi/2 * np.random.uniform(0.2, 0.6)
traj = np.vstack([traj, pose[:2]])
if params.animate:
plt.cla()
draw_map(obstacles, params)
visualize(traj, pose, params)
plt.pause(0.1)
visualize(traj, pose, params)
plt.show()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
# -*- coding: utf-8 -*-
#015_cleaner.py
#WIP
import sys
sys.path.insert(0 , 'D:/Projets/shade_django/apis/')
from voca import AddLog , StringFormatter , OutFileCreate , StrValidator , OdditiesFinder
##################################
# Init des paths et noms de fichiers
AddLog('title' , 'Début du nettoyage du fichier')
work_dir = 'D:/Projets/shade_django/apis/raw/007_raw/'
# Nom du fichier source
raw_file = 'src'
##################################
# Création de la liste brute
raw_list = open(work_dir + raw_file , 'r').read().splitlines()
# Il y a plusieurs noms par ligne, il faut donc les séparer dans une nouvelle liste
separated_names_list = []
for line in raw_list:
line_names = line.split('' '\t')
for name in line_names:
separated_names_list.append(name)
##################################
# Choix des prénoms uniquement masculins
men_name_list = []
for line in separated_names_list:
if line[-1:] in ['A' , 'a']:
pass
else:
men_name_list.append(line)
##################################
# Formatage du texte
# Init de la list contenant la sortie de StringFormatter
formatted_list = []
AddLog('subtitle' , 'Début de la fonction StringFormatter')
for line in men_name_list:
formatted_list.append(StringFormatter(line))
##################################
# going through oddities finder
AddLog('subtitle' , 'Début de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( formatted_list )
##################################
#The mighty StrValidatoooor
AddLog('subtitle' , 'Début de la fonction StrValidator')
validated_list = StrValidator( list_without_oddities )
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('D:/Projets/shade_django/apis/out/','015_src',validated_list,'FirstName;homme;Pays clémentin , Ravénie , Lombrie')
|
# STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
from . import core, gpu_stump
from .ostinato import _ostinato, _get_central_motif
from .gpu_aamp_ostinato import gpu_aamp_ostinato
@core.non_normalized(gpu_aamp_ostinato)
def gpu_ostinato(Ts, m, device_id=0, normalize=True, p=2.0):
"""
Find the z-normalized consensus motif of multiple time series with one or more GPU
devices
This is a wrapper around the vanilla version of the ostinato algorithm
which finds the best radius and a helper function that finds the most
central conserved motif.
Parameters
----------
Ts : list
A list of time series for which to find the most central consensus motif
m : int
Window size
device_id : int or list, default 0
The (GPU) device number to use. The default value is `0`. A list of
valid device ids (int) may also be provided for parallel GPU-STUMP
computation. A list of all valid device ids can be obtained by
executing `[device.id for device in numba.cuda.list_devices()]`.
normalize : bool, default True
When set to `True`, this z-normalizes subsequences prior to computing distances.
Otherwise, this function gets re-routed to its complementary non-normalized
equivalent set in the `@core.non_normalized` function decorator.
p : float, default 2.0
The p-norm to apply for computing the Minkowski distance. This parameter is
ignored when `normalize == False`.
Returns
-------
central_radius : float
Radius of the most central consensus motif
central_Ts_idx : int
The time series index in `Ts` which contains the most central consensus motif
central_subseq_idx : int
The subsequence index within time series `Ts[central_motif_Ts_idx]` the contains
most central consensus motif
See Also
--------
stumpy.ostinato : Find the z-normalized consensus motif of multiple time series
stumpy.ostinatoed : Find the z-normalized consensus motif of multiple time series
with a distributed dask cluster
Notes
-----
`DOI: 10.1109/ICDM.2019.00140 \
<https://www.cs.ucr.edu/~eamonn/consensus_Motif_ICDM_Long_version.pdf>`__
See Table 2
The ostinato algorithm proposed in the paper finds the best radius
in `Ts`. Intuitively, the radius is the minimum distance of a
subsequence to encompass at least one nearest neighbor subsequence
from all other time series. The best radius in `Ts` is the minimum
radius amongst all radii. Some data sets might contain multiple
subsequences which have the same optimal radius.
The greedy Ostinato algorithm only finds one of them, which might
not be the most central motif. The most central motif amongst the
subsequences with the best radius is the one with the smallest mean
distance to nearest neighbors in all other time series. To find this
central motif it is necessary to search the subsequences with the
best radius via `stumpy.ostinato._get_central_motif`
Examples
--------
>>> from numba import cuda
>>> if __name__ == "__main__":
... all_gpu_devices = [device.id for device in cuda.list_devices()]
... stumpy.gpu_ostinatoe(
... [np.array([584., -11., 23., 79., 1001., 0., 19.]),
... np.array([600., -10., 23., 17.]),
... np.array([ 1., 9., 6., 0.])],
... m=3,
... device_id=all_gpu_devices)
(1.2370237678153826, 0, 4)
"""
M_Ts = [None] * len(Ts)
Σ_Ts = [None] * len(Ts)
for i, T in enumerate(Ts):
Ts[i], M_Ts[i], Σ_Ts[i] = core.preprocess(T, m)
bsf_radius, bsf_Ts_idx, bsf_subseq_idx = _ostinato(
Ts, m, M_Ts, Σ_Ts, device_id=device_id, mp_func=gpu_stump
)
(
central_radius,
central_Ts_idx,
central_subseq_idx,
) = _get_central_motif(Ts, bsf_radius, bsf_Ts_idx, bsf_subseq_idx, m, M_Ts, Σ_Ts)
return central_radius, central_Ts_idx, central_subseq_idx
|
# -*- coding: utf-8 -*-
from setuptools import find_packages
from setuptools import setup
version = '1.0.0'
description = 'ROOCS data operations demo library.'
long_description = 'Prototype for 34e libraries and interfaces'
requirements = [line.strip() for line in open('requirements.txt')]
dev_requirements = [line.strip() for line in open('requirements_dev.txt')]
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'License :: OSI Approved :: Apache Software License',
]
setup(name='daops',
version=version,
description=description,
long_description=long_description,
classifiers=classifiers,
keywords='roocs daops demo',
author='Ag Stephens',
author_email="ag.stephens@stfc.ac.uk",
python_requires='>=3.7',
url='https://github.com/roocs/proto-lib-34e',
license="Apache License v2.0",
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
extras_require={
"dev": dev_requirements, # pip install ".[dev]"
},
entry_points={},
)
|
import torch
from torch import nn
from torch._C import dtype
from models.nets import MLP
from models import Transform
from torch.nn import functional as F
import numpy as np
from utils import sum_except_batch
import math
# Adapted from https://github.com/bayesiains/nsf/blob/master/nde/transforms/splines/rational_quadratic_test.py
DEFAULT_MIN_BIN_WIDTH = 1e-3
DEFAULT_MIN_BIN_HEIGHT = 1e-3
DEFAULT_MIN_DERIVATIVE = 1e-3
def searchsorted(bin_locations, inputs, eps=1e-6):
bin_locations[..., -1] += eps
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
def unconstrained_rational_quadratic_spline(inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails='linear',
tail_bound=3.,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE):
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(inputs)
logabsdet = torch.zeros_like(inputs)
if tails == 'linear':
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
constant = torch.tensor(math.log(math.exp((1 - min_derivative) - 1)))
unnormalized_derivatives[..., 0] = constant
unnormalized_derivatives[..., -1] = constant
outputs[outside_interval_mask] = inputs[outside_interval_mask]
logabsdet[outside_interval_mask] = 0
else:
raise RuntimeError('{} tails are not implemented.'.format(tails))
a, b = rational_quadratic_spline(
inputs=inputs[inside_interval_mask],
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
inverse=inverse,
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative
)
outputs[inside_interval_mask] = a.type(outputs.dtype)
logabsdet[inside_interval_mask] = b.type(logabsdet.dtype)
return outputs, logabsdet
def rational_quadratic_spline(inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
left=0., right=1., bottom=0., top=1.,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE):
if torch.min(inputs) < left or torch.max(inputs) > right:
raise Exception()
num_bins = unnormalized_widths.shape[-1]
if min_bin_width * num_bins > 1.0:
raise ValueError('Minimal bin width too large for the number of bins')
if min_bin_height * num_bins > 1.0:
raise ValueError('Minimal bin height too large for the number of bins')
widths = F.softmax(unnormalized_widths, dim=-1)
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
cumwidths = torch.cumsum(widths, dim=-1)
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
cumwidths = (right - left) * cumwidths + left
cumwidths[..., 0] = left
cumwidths[..., -1] = right
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
heights = F.softmax(unnormalized_heights, dim=-1)
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
cumheights = torch.cumsum(heights, dim=-1)
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
cumheights = (top - bottom) * cumheights + bottom
cumheights[..., 0] = bottom
cumheights[..., -1] = top
heights = cumheights[..., 1:] - cumheights[..., :-1]
if inverse:
bin_idx = searchsorted(cumheights, inputs)[..., None]
else:
bin_idx = searchsorted(cumwidths, inputs)[..., None]
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
delta = heights / widths
input_delta = delta.gather(-1, bin_idx)[..., 0]
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
input_derivatives_plus_one = derivatives[...,
1:].gather(-1, bin_idx)[..., 0]
input_heights = heights.gather(-1, bin_idx)[..., 0]
if inverse:
a = (((inputs - input_cumheights) * (input_derivatives
+ input_derivatives_plus_one
- 2 * input_delta)
+ input_heights * (input_delta - input_derivatives)))
b = (input_heights * input_derivatives
- (inputs - input_cumheights) * (input_derivatives
+ input_derivatives_plus_one
- 2 * input_delta))
c = - input_delta * (inputs - input_cumheights)
discriminant = b.pow(2) - 4 * a * c
assert (discriminant >= 0).all()
root = (2 * c) / (-b - torch.sqrt(discriminant))
outputs = root * input_bin_widths + input_cumwidths
theta_one_minus_theta = root * (1 - root)
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta)
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - root).pow(2))
logabsdet = torch.log(derivative_numerator) - \
2 * torch.log(denominator)
return outputs, -logabsdet
else:
theta = (inputs - input_cumwidths) / input_bin_widths
theta_one_minus_theta = theta * (1 - theta)
numerator = input_heights * (input_delta * theta.pow(2)
+ input_derivatives * theta_one_minus_theta)
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta)
outputs = input_cumheights + numerator / denominator
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - theta).pow(2))
logabsdet = torch.log(derivative_numerator) - \
2 * torch.log(denominator)
return outputs, logabsdet
class RationalQuadraticSplineCoupling(Transform):
def __init__(self, input_dim, hidden_dims, nonlinearity, num_bins, context_dim=0, event_dim=-1):
super().__init__()
self.event_dim = event_dim
self.input_dim = input_dim
self.split_dim = input_dim//2
self.context_dim = context_dim
self.num_bins = num_bins
out_dim = (self.num_bins*3 + 1)*self.split_dim
self.nn = MLP(self.split_dim + context_dim, hidden_dims,
out_dim, nonlinearity, residual=True)
def _output_dim_multiplier(self):
return 3 * self.num_bins + 1
def forward(self, x, context=None):
x2_size = self.input_dim - self.split_dim
x1, x2 = x.split([self.split_dim, x2_size], dim=self.event_dim)
nn_input = torch.cat(
(x1, context), dim=self.event_dim) if self.context_dim != 0 else x1
nn_out = torch.utils.checkpoint.checkpoint(
self.nn, nn_input, preserve_rng_state=False)
unnormalized_widths, unnormalized_heights, unnormalized_derivatives = nn_out.reshape(
nn_input.shape[:2]+(-1, self._output_dim_multiplier())).split([self.num_bins, self.num_bins, self.num_bins+1], dim=self.event_dim)
# Inverse not specified as default is false
y2, ldj = torch.utils.checkpoint.checkpoint(unconstrained_rational_quadratic_spline,
x2,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
preserve_rng_state=False)
ldj = sum_except_batch(ldj, num_dims=2)
y1 = x1
return torch.cat([y1, y2], dim=self.event_dim), ldj
def inverse(self, y, context=None):
y2_size = self.input_dim - self.split_dim
y1, y2 = y.split([self.split_dim, y2_size], dim=self.event_dim)
x1 = y1
nn_input = torch.cat(
(y1, context), dim=self.event_dim) if self.context_dim != 0 else y1
unnormalized_widths, unnormalized_heights, unnormalized_derivatives = self.nn(nn_input).reshape(
nn_input.shape[:2]+(-1, self._output_dim_multiplier())).split([self.num_bins, self.num_bins, self.num_bins+1], dim=self.event_dim)
x2, _ = unconstrained_rational_quadratic_spline(y2,
unnormalized_widths=unnormalized_widths,
unnormalized_heights=unnormalized_heights,
unnormalized_derivatives=unnormalized_derivatives,
inverse=True)
return torch.cat([x1, x2], dim=self.event_dim)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.