content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
import paramiko
import pandas as pd
import os
import datetime
import glob
import threading
import time
import boto3
from botocore.exceptions import ClientError
class sendtoserver(threading.Thread):
def __init__(self, dvr_num, dvr_ch, save_path):
threading.Thread.__init__(self)
self.dvr_num = str(dvr_num)
self.dvr_ch = str(dvr_ch)
self.server_path = save_path
def setData(self):
#self.now = datetime.datetime.now()
#self.today = self.now.strftime('%Y%m%d')
#self.work_dir = os.getcwd() + '/'
self.local_path = os.path.join('/home/qtumai/workspace/stream/save_video/', self.dvr_num, self.dvr_ch)
print('local_path : ', self.local_path)
self.file_name = self.get_filename()
self.access_key_id = "AKIAQXXRPHPXI4CQSHEX"
self.secret_key = "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"
self.bucket_name = "qtumaitest"
while True:
try:
self.response = self.create_bucket(self.bucket_name)
break
except Exception as e:
print(e)
continue
self.s3 = boto3.client("s3", aws_access_key_id = self.access_key_id, aws_secret_access_key = self.secret_key)
def get_filelist(self):
files = os.listdir(self.local_path)
condition = self.local_path + '/*.mkv'
files = glob.glob(condition)
return files
def send_sig(self, result):
return result < datetime.datetime.now()
def get_filename(self):
self.files = ''
self.file_name = ''
self.files = self.get_filelist()
for self.file_name in self.files:
self.file_name = self.file_name.split('/')[-1]
print(self.file_name)
create_time = self.file_name[:12]
year = create_time[:4]
month = create_time[4:6]
day = create_time[6:8]
hour = create_time[8:10]
_min = create_time[10:12]
print(int(year), int(month), int(day), int(hour), int(_min))
create_time = datetime.datetime(int(year), int(month), int(day), int(hour), int(_min))
result = create_time + datetime.timedelta(seconds = 1860)
print(self.send_sig(result))
if self.send_sig(result) == True:
print('select file : ', self.file_name)
break
else:
time.sleep(1)
self.files = ''
self.file_name = ''
self.files = self.get_filelist()
pass
return self.file_name
#채널 분류
def ch_classification(self):
entrance = ['1']
etc = ['2', '3']
out = ['4']
if self.dvr_ch in entrance:
self.group = 'entrance'
elif self.dvr_ch in etc:
self.group = 'etc'
elif self.dvr_ch in out:
self.group = 'out'
else:
print(self.dvr_ch + '채널 설정확인')
def delete_file(self):
os.remove(self.local_path + '/' + self.file)
def create_bucket(self, bucket_name):
print("Creating a bucket..." + bucket_name)
s3 = boto3.client("s3", aws_access_key_id = self.access_key_id, aws_secret_access_key = self.secret_key)
try:
response = s3.create_bucket(Bucket = bucket_name, CreateBucketConfiguration = {"LocationConstraint": "ap-northeast-2"})
return response
except ClientError as e:
if e.response['Error']['Code'] == "BucketAlreadyOwnedByYou":
print("Bucket already exists. skipping..")
else:
print("Unknown error, exit...")
def transfer(self):
try:
self.file = ''
self.file = self.get_filename()
self.shop_code = self.file.split('_')[1]
print('file upload : ', self.file)
print('file_path : ', self.local_path)
self.s3.upload_file(self.local_path + '/' + self.file, self.bucket_name, 'HMALL/' + self.shop_code + '/' + self.group + '/' + self.file)
print('upload finish : ', self.file)
self.delete_file()
except IndexError as e:
print(e)
time.sleep(10)
'''
def transfer(self):
self.file = ''
self.transport = paramiko.transport.Transport(host, port)
while True:
try:
self.file = self.get_filename()
print('file_name : ', self.file)
print(self.transport.getpeername())
self.transport.connect(username = self.acc, password = self.pw)
self.sftp = paramiko.SFTPClient.from_transport(self.transport)
self.sftp.put(self.local_path + self.file, self.server_path + '/' + self.file)
self.delete_file(self.file)
except Exception as e:
print('error : ', e)
pass
finally:
self.sftp.close()
self.transport.close()
break
'''
def run(self):
while True:
self.setData()
self.ch_classification()
#self.create_log(self.local_path)
self.transfer()
time.sleep(0.5)
if __name__ == '__main__':
def get_dvr_info(idx):
df = pd.read_csv('/home/qtumai/workspace/stream/config.txt')
dvr_num = df.loc[idx, 'dvr_num']
dvr_ip = df.loc[idx, 'dvr_ip']
dvr_ch = df.loc[idx, 'dvr_ch']
shop_code = df.loc[idx, 'shop_code']
return dvr_num, dvr_ch ,shop_code
host = '175.197.68.67'
port = 22
acc = 'admin'
pw = '@!Chaos123'
config = pd.read_csv('/home/qtumai/workspace/stream/config.txt')
save_path = '/homes/brooks/save_video/'
for i in config.index:
dvr_num, dvr_ch, shop_code = get_dvr_info(i)
main = sendtoserver(dvr_num, dvr_ch, save_path)
main.start()
time.sleep(0.01)
'''
dvr_num, dvr_ch, shop_code = get_dvr_info(0)
main = sendtoserver(dvr_num, dvr_ch, save_path)
main.start()
time.sleep(0.5)
'''
|
#!/usr/bin/env python
import sys
mode=0
n = int(sys.argv[1])
f = open("perftest.rail","w")
f.write("$ 'main'\n")
if mode==0:
for i in range(n):
f.write(" \\-")
f.write(n*"1o")
f.write("-\\\n")
f.write((4+n*2)*" "+ "|\n")
f.write(" /-"+n*("o1")+"-/\n")
f.write("|\n")
f.write("#")
elif mode==1:
n = 2 * n
f.write(" \\\n")
f.write(" \\\n")
for i in range(n):
if i != n:
f.write((i+3)*" "+"\\"+(3*(n-i)-3)*" ")
if i ==0:
f.write(" /-\\\n")
elif i%2==1:
f.write((i//2+1)*" / \\"+"\n")
else:
f.write(" "+"/-<"+(i//2-1)*" --<"+" --\\"+"\n")
f.write((n+2+1)*" "+"\\-<"+(n//2-1)*" --<"+" --#\n")
for i in range(n):
if i%2==0:
f.write((n+4+(i+1)*2)*" "+(n//2-i//2)*"\\ / "+"\n")
elif i!= n-1:
f.write((n+3+(i+1)*2)*" "+"\\-<"+(n//2-i//2-2)*" --<"+" --/"+"\n")
f.write(((3*n)+3)*" "+"\\-/")
f.close()
|
from blueprints import create_app
from config import config
app = create_app(config)
|
"""
Helpers to pack and unpack a unicode character into raw bytes.
"""
import sys
UNICODE_SIZE = 4
BIGENDIAN = sys.byteorder == "big"
def pack_unichar(unich, buf, pos):
pack_codepoint(ord(unich), buf, pos)
def pack_codepoint(unich, buf, pos):
if UNICODE_SIZE == 2:
if BIGENDIAN:
buf.setitem(pos, chr(unich >> 8))
buf.setitem(pos+1, chr(unich & 0xFF))
else:
buf.setitem(pos, chr(unich & 0xFF))
buf.setitem(pos+1, chr(unich >> 8))
else:
if BIGENDIAN:
buf.setitem(pos, chr(unich >> 24))
buf.setitem(pos+1, chr((unich >> 16) & 0xFF))
buf.setitem(pos+2, chr((unich >> 8) & 0xFF))
buf.setitem(pos+3, chr(unich & 0xFF))
else:
buf.setitem(pos, chr(unich & 0xFF))
buf.setitem(pos+1, chr((unich >> 8) & 0xFF))
buf.setitem(pos+2, chr((unich >> 16) & 0xFF))
buf.setitem(pos+3, chr(unich >> 24))
def unpack_codepoint(rawstring):
assert len(rawstring) == UNICODE_SIZE
if UNICODE_SIZE == 2:
if BIGENDIAN:
n = (ord(rawstring[0]) << 8 |
ord(rawstring[1]))
else:
n = (ord(rawstring[0]) |
ord(rawstring[1]) << 8)
else:
if BIGENDIAN:
n = (ord(rawstring[0]) << 24 |
ord(rawstring[1]) << 16 |
ord(rawstring[2]) << 8 |
ord(rawstring[3]))
else:
n = (ord(rawstring[0]) |
ord(rawstring[1]) << 8 |
ord(rawstring[2]) << 16 |
ord(rawstring[3]) << 24)
return n
def unpack_unichar(rawstring):
return unichr(unpack_codepoint(rawstring))
|
import numpy as np
from itertools import combinations_with_replacement
import time
from random import choice
class CouldNotClassifyClusterError(Exception):
pass
class GaussianClusterTracker(object):
def __init__(self, atoms=None, threshold=0.001,
cluster_elements=[], num_clusters=1, init_centroids=[]):
self.atoms = atoms
self._nn_dist= self._nn_distance()
self.threshold = threshold
self.cluster_id = -np.ones(len(self.atoms), dtype=np.int8)
self.probability = np.zeros(len(self.atoms))
self.gaussians = []
self.num_members = []
self.prob_belong = np.zeros((len(atoms), num_clusters))
self.cluster_elements = cluster_elements
if init_centroids:
if len(init_centroids) != num_clusters:
raise ValueError("The length of the centroids, "
"must match the number of clusters.")
for centroid in init_centroids:
self.add_gaussian(centroid)
else:
for _ in range(num_clusters):
indx = choice(self.solute_indices)
self.add_gaussian(self.atoms[indx].position)
self.frac_per_cluster = np.zeros(num_clusters) + 1.0/num_clusters
self.output_every = 20
self._check_input()
self.penalty = self._nn_distance()**2
def add_gaussian(self, mu, sigma=None):
from cemc.tools import MultivariateGaussian
if sigma is None:
sigma = np.eye(3)*self._nn_distance()**2
self.gaussians.append(MultivariateGaussian(mu=mu, sigma=sigma))
self.num_members.append(0)
def _check_input(self):
"""Perform some checks on the users input."""
if self.num_cluster_elements <= 1:
raise ValueError("There is only one cluster element present!")
@property
def num_cluster_elements(self):
return sum(1 for atom in self.atoms
if atom.symbol in self.cluster_elements)
@property
def num_clusters(self):
return len(self.gaussians)
@property
def solute_indices(self):
return [atom.index for atom in self.atoms if atom.symbol in self.cluster_elements]
@property
def num_solute_atoms(self):
return sum(1 for atom in self.atoms if atom.symbol in self.cluster_elements)
def get_cluster(self, cluster_id):
"""Return an atomic cluster."""
return np.nonzero(self.cluster_id==cluster_id)[0]
def likelihoods(self, x):
"""Compute all the likely hoods for all distribution."""
prob = []
for gauss in self.gaussians:
prob.append(gauss(x))
return prob
def recenter_gaussians(self):
"""If a gaussian has no member. Recenter it to random location."""
for i, gaussian in enumerate(self.gaussians):
if self.num_members[i] == 0:
indx = choice(self.solute_indices)
gaussian.mu = self.atoms[indx].position
def move_create_new_cluster(self, system_changes):
"""Check if the proposed move create a new cluster."""
for change in system_changes:
if change[2] in self.cluster_elements:
prob = self.likelihoods(self.atoms[change[0]].positions)
if np.max(prob) < self.threshold:
return True
return False
def update_clusters(self, system_changes):
"""Classify and update the cluster values."""
# Currently only works with one cluster
assert self.num_clusters == 1
for change in system_changes:
x = self.atoms[change[0]].position
if change[1] in self.cluster_elements and change[2] not in self.cluster_elements:
# One atom is removed
old_uid = self.cluster_id[change[0]]
mu = self.gaussians[old_uid].mu
sigma1 = self.gaussians[old_uid].sigma
mu2 = mu
sigma2 = sigma1
N = self.num_members[old_uid]
mu1_mu1T = np.outer(mu, mu)
mu2_mu2T = np.outer(mu2, mu2)
if N > 1:
mu2 = (N*mu - x)/(N-1)
mu2_mu2T = np.outer(mu2, mu2)
sigma2 = (N*sigma1 + N*mu1_mu1T - np.outer(x, x))/(N-1) - mu2_mu2T - np.eye(3)*self.penalty/(N-1)
self.num_members[old_uid] = N-1
self.gaussians[old_uid].mu = mu2
self.gaussians[old_uid].sigma = sigma2
self.cluster_id[old_uid] = -1
elif change[1] not in self.cluster_elements and change[2] in self.cluster_elements:
# One atom is added to the cluster
prob = self.likelihoods(self.atoms[change[0]].position)
uid = np.argmax(prob)
self.cluster_id[change[0]] = uid
N = self.num_members[uid]
mu = self.gaussians[uid].mu
sigma1 = self.gaussians[uid].sigma
mu2 = (N*mu + x)/(N+1)
mu1_mu1T = np.outer(mu, mu)
mu2_mu2T = np.outer(mu2, mu2)
sigma2 = (N*sigma1 + N*mu1_mu1T + np.outer(x, x))/(N+1) - mu2_mu2T + np.eye(3)*self.penalty/(N+1)
self.gaussians[uid].mu = mu2
self.gaussians[uid].sigma = sigma2
self.num_members[uid] = N+1
def solute_atom_belong_to_cluster(self, system_change):
"""Return True if the new site belong to the cluster."""
for change in system_change:
if change[2] in self.cluster_elements:
prob = self.likelihoods(self.atoms[change[0]].position)
if prob < self.threshold:
return False
return True
def _nn_distance(self):
"""Find the nearest neighbour distance."""
indx = list(range(1, len(self.atoms)))
dists = self.atoms.get_distances(0, indx)
return np.min(dists)
def add_new_cluster(self):
# Find one atom that is not well classified
prob = np.argmin(self.probability[self.cluster_id != -1])
indx = np.argmin(np.abs(prob - self.probability))
mu = self.atoms[indx].position
sigma = np.eye(3)*self.penalty
self.add_gaussian(mu, sigma=sigma)
def find_clusters(self, max_iter=10000):
"""Find all clusters from scratch."""
# Initially we start by a assuming there
# exists a spherical cluster at the center
converged = False
step = 0
now = time.time()
prev_cost = 100000000000000.0
step = 0
print("here")
while not converged:
print(self.gaussians[0].mu)
if step > 1:
self.recenter_gaussians()
step += 1
if time.time() - now > self.output_every:
print("Cost: {:.2e}. Iteration: {}".format(prev_cost, step))
now = time.time()
# Expectation step
self._calc_belong_prob()
# Maximation step
m_c = np.sum(self.prob_belong, axis=0)
self.frac_per_cluster = m_c/self.num_solute_atoms
self.set_mu_sigma()
cost = self.log_likelihood()
self.classify()
if abs(cost - prev_cost) < self.threshold:
print("Final log-likelihood: {:.2e}".format(cost))
return
prev_cost = cost
if step >= max_iter:
return
def set_mu_sigma(self):
"""Calculate new values for sigma and mu."""
for i, gaussian in enumerate(self.gaussians):
r = self.prob_belong[:, i]
m_c = np.sum(r)
pos = self.atoms.get_positions()
mu = pos.T.dot(r)/m_c
pos -= mu
sigma = pos.T.dot(np.diag(r)).dot(pos)/m_c
sigma += np.eye(3)*self.penalty
gaussian.mu = mu
gaussian.sigma = sigma
def log_likelihood(self):
log_likelihood = 0.0
count = 0
for atom in self.atoms:
if atom.symbol not in self.cluster_elements:
continue
likeli = np.array(self.likelihoods(atom.position))
log_likelihood += np.log(np.sum(self.frac_per_cluster*likeli))
count += 1
return log_likelihood/count
def _calc_belong_prob(self):
self.prob_belong[:, :] = 0.0
for atom in self.atoms:
if atom.symbol not in self.cluster_elements:
continue
likeli = np.array(self.likelihoods(atom.position))
r = self.frac_per_cluster*likeli/(np.sum(self.frac_per_cluster*likeli))
self.prob_belong[atom.index, :] = r
def show_clusters(self):
from ase.gui.gui import GUI
from ase.gui.images import Images
all_clusters = []
self.tag_by_probability()
for uid in range(len(self.gaussians)):
cluster = self.atoms[self.cluster_id == uid]
cluster.info = {"name": "Cluster ID: {}".format(uid)}
all_clusters.append(cluster)
images = Images()
images.initialize(all_clusters)
gui = GUI(images)
gui.show_name = True
gui.run()
def show_gaussians(self, scale=1.0, r=1.0, show=True):
"""Show clusters as ellipsoids.
:param float scale: Number of standard deviations to show
:param float r: Thickness of the tubes used to represent
the unit cell
:param bool show: If True mlab.show() is executed
"""
from mayavi.api import Engine
from mayavi import mlab
engine = Engine()
engine.start()
scene = engine.new_scene()
scene.scene.disable_render = True # for speed
surfs = []
self._draw_cell(r=r)
for gauss in self.gaussians:
surf = self._show_one_gaussian(gauss, engine, scale=scale)
surfs.append(surf)
scene.scene.disable_render = False
for i, surf in enumerate(surfs):
vtk_srcs = mlab.pipeline.get_vtk_src(surf)
vtk_src = vtk_srcs[0]
npoints = len(vtk_src.point_data.scalars)
vtk_src.point_data.scalars = np.tile(i, npoints)
if show:
mlab.show()
def _draw_cell(self, color=(0, 0, 0), r=1.0):
from mayavi import mlab
cell = self.atoms.get_cell()
for i in range(3):
x = [0, cell[i, 0]]
y = [0, cell[i, 1]]
z = [0, cell[i, 2]]
mlab.plot3d(x, y, z, color=color, tube_radius=r)
x = [cell[i, 0], cell[i, 0] + cell[(i+1)%3, 0]]
y = [cell[i, 1], cell[i, 1] + cell[(i+1)%3, 1]]
z = [cell[i, 2], cell[i, 2] + cell[(i+1)%3, 2]]
mlab.plot3d(x, y, z, color=color, tube_radius=r)
x = [cell[i, 0], cell[i, 0] + cell[(i+2)%3, 0]]
y = [cell[i, 1], cell[i, 1] + cell[(i+2)%3, 1]]
z = [cell[i, 2], cell[i, 2] + cell[(i+2)%3, 2]]
mlab.plot3d(x, y, z, color=color, tube_radius=r)
x = [cell[i, 0] + cell[(i+1)%3, 0], cell[i, 0] + cell[(i+1)%3, 0] + cell[(i+2)%3, 0]]
y = [cell[i, 1] + cell[(i+1)%3, 1], cell[i, 1] + cell[(i+1)%3, 1] + cell[(i+2)%3, 1]]
z = [cell[i, 2] + cell[(i+1)%3, 2], cell[i, 2] + cell[(i+1)%3, 2] + cell[(i+2)%3, 2]]
mlab.plot3d(x, y, z, color=color, tube_radius=r)
def _show_one_gaussian(self, gauss, engine, scale=1.0):
"""Plot one of the gaussians."""
from mayavi.sources.api import ParametricSurface
from mayavi.modules.api import Surface
source = ParametricSurface()
source.function = 'ellipsoid'
engine.add_source(source)
eigval, eigvec = np.linalg.eig(gauss.sigma)
angles = rotationMatrixToEulerAngles(eigvec.T)*180.0/np.pi
surface = Surface()
source.add_module(surface)
actor = surface.actor
actor.property.opacity = 0.5
actor.property.color = tuple(np.random.rand(3))
actor.mapper.scalar_visibility = False
actor.property.backface_culling = True
actor.actor.orientation = np.array([0.0, 0.0, 0.0])
actor.actor.origin = np.array([0.0, 0.0, 0.0])
actor.actor.position = np.array(gauss.mu)
actor.actor.scale = np.array(scale*np.sqrt(eigval))
actor.actor.rotate_x(angles[0])
actor.actor.rotate_y(angles[1])
actor.actor.rotate_z(angles[2])
return surface
def tag_by_probability(self):
"""Add the probability of belonging to this cluster
in the tag."""
# Put the weights in the momenta entry
self.atoms.arrays["initial_charges"] = self.probability
def classify(self):
"""Classify all atoms."""
self.num_members = [0 for _ in range(len(self.num_members))]
self.cluster_id[:] = -1
for atom in self.atoms:
if atom.symbol not in self.cluster_elements:
continue
#prob = self.likelihoods(atom.position)
uid = np.argmax(self.prob_belong[atom.index, :])
self.cluster_id[atom.index] = uid
self.probability[atom.index] = np.max(np.max(self.prob_belong[atom.index, :]))
self.num_members[uid] += 1
def update_gaussian_parameters(self):
"""Update the parameters of the Gaussians."""
for uid in range(len(self.gaussians)):
pos = self.atoms.get_positions()[self.cluster_id==uid, :]
if pos.shape[0] >= 2:
mu = np.mean(pos, axis=0)
sigma = np.zeros((3, 3))
mu_muT = np.outer(mu, mu)
for i in range(pos.shape[0]):
sigma += np.outer(pos[i, :], pos[i, :]) - mu_muT
sigma /= pos.shape[0]
self.gaussians[uid].mu = mu
self.gaussians[uid].sigma = sigma
def rotationMatrixToEulerAngles(R) :
sy = np.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = np.arctan2(R[2,1] , R[2,2])
y = np.arctan2(-R[2,0], sy)
z = np.arctan2(R[1,0], R[0,0])
else :
x = np.arctan2(-R[1,2], R[1,1])
y = np.arctan2(-R[2,0], sy)
z = 0
return np.array([x, y, z])
|
import rospy
class Node():
def __init__(self, name, topics={}):
rospy.init_node(name)
self.subscribers = []
self.timed_subscriber_memory = {}
self.pubs = {}
for topic, msg_type in topics.items():
self.pubs[topic] = rospy.Publisher(
topic, msg_type
)
def spin(self):
for sub in self.subscribers:
rospy.Subscriber(
sub["topic"], sub["type"], sub["callback"]
)
rospy.spin()
def subscribe(self, topic, msg_type):
def subscribe_decorator(callback):
self.subscribers.append({
"topic": topic,
"callback": callback,
"type": msg_type
})
return subscribe_decorator
def subscribe_timed(self, period, topic, msg_type):
def timed_decorator(callback):
self.timed_subscriber_memory[topic] = None
self.subscribers.append({
"topic": topic,
"callback": lambda msg: self.timed_subscriber_memory.update({topic: msg}),
"type": msg_type
})
rospy.Timer(
period=rospy.Duration(period),
callback=lambda event: (
callback(self.timed_subscriber_memory[topic])
if self.timed_subscriber_memory[topic]
else False
)
)
return timed_decorator
def timed(self, period):
def timed_decorator(callback):
rospy.Timer(
period=rospy.Duration(period),
callback=callback
)
return timed_decorator
def publish(self, topic, msg):
self.pubs[topic].publish(msg)
def service_proxy(self, name, service_class):
return rospy.ServiceProxy(name, service_class)
|
log_path = 'logs/'
icon_server_path = './icon_server.ico'
icon_client_path = './icon_client.ico'
client_window_name = 'ChatClient'
server_window_name = 'ChatServer'
encoding = 'utf-8'
MAXSIZE = 2048
server_port = 3000
client_port = 4000
|
from __future__ import unicode_literals
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from .settings import * # NOQA
|
"""
LANGUAGE: PYTHON
AUTHOR: cdefga
GITHUB: https://github.com/cdefga
"""
import numpy as np
def n_dimensional_distance(a: np.ndarray, b: np.ndarray) -> float:
assert a.shape == b.shape, 'dimension of input must be equal'
dist = np.sqrt(np.sum((a - b)**2))
return dist
if __name__ == '__main__':
a = np.array([1,2,3,4])
b = np.array([1,2,3,5])
dist = n_dimensional_distance(a, b)
print(dist)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_learner import BaseLearner
import model
import torch
import torch.nn as nn
import torch.optim as optim
class TransformerLearner(BaseLearner):
def __init__(self, optimizer, lr, model_type, vocsize, emsize, buffer_len, nhead, nhid, nlayers, dropout, learn_iterations, warmup, after_warmup):
criterion = nn.CrossEntropyLoss()
super(TransformerLearner, self).__init__(
criterion, vocsize, learn_iterations)
self.model = model.TransformerModel(
vocsize, emsize, nhead, nhid, nlayers, dropout)
self.dmodel = emsize
if lr == 42:
self.lr = self.dmodel**-0.5
else:
self.lr = lr
self.step = 1
self.warmup = warmup
self.after_warmup = after_warmup
self.buffer_len = buffer_len
self.buffer = None
kwargs = {}
if optimizer == 'Adam':
kwargs['betas'] = (0.9, 0.98)
kwargs['eps'] = 1e-9
lr = self.compute_lr()
self.optimizer = getattr(optim, optimizer)(self.model.parameters(), lr=lr)
def compute_lr(self):
return self.lr * min(self.step**-0.5 if self.after_warmup == 'decrease' else self.warmup**-0.5,
self.step*self.warmup**-1.5)
def learn(self, *args):
self.optimizer.param_groups[0]['lr'] = self.compute_lr()
self.step += 1
ret = super(TransformerLearner, self).learn(*args)
return ret
def predict(self, data, hidden):
self.append_to_buffer(data)
output = self.model(self.get_buffered_data())
output = output[-data.size(0):,:]
return output, hidden
def append_to_buffer(self, data):
if self.buffer is None:
self.buffer = data.detach().clone()
else:
self.buffer = torch.cat([self.buffer, data], dim=0)
self.buffer = self.buffer[-self.buffer_len:,:]
def get_buffered_data(self):
return self.buffer
def generate(self, data, hidden):
raise RuntimeError("Not implemented (because of missing buffering)")
output = self.model(data)
return output.view(-1, self.vocsize), None
def train_model(self, loss, prediction, data, targets):
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def get_lr(self):
return self.optimizer.param_groups[0]['lr']
def get_num_parameters(self):
return sum(p.view(-1).size(0) for p in self.model.parameters())
def create_hidden_states(self, bsz):
return None
def train_mode(self):
self.model.train()
def evaluate_mode(self):
self.model.eval()
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rtax(Package):
"""Rapid and accurate taxonomic classification of short paired-end
sequence reads from the 16S ribosomal RNA gene"""
homepage = "https://github.com/davidsoergel/rtax"
url = "http://static.davidsoergel.com/rtax-0.984.tgz"
version('0.984', sha256='92ad9a881ca1d17221794b4313654291b30df6a9edcd0453034a090ae13a3442')
depends_on('usearch')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('rtax', prefix.bin)
install_tree('scripts', prefix.bin.scripts)
install_tree('greengenes', prefix.bin.greengenes)
|
import sys
from collections import Counter
x = int(sys.stdin.readline())
xl = list(sys.stdin.readline().split())
y = int(sys.stdin.readline())
yl = list(sys.stdin.readline().split())
xl.sort()
count = Counter(xl)
for i in range(len(yl)):
if yl[i] in count:
print(count[yl[i]],end=" ")
else:
print("0",end=" ")
|
x = 6
x.meh = 5
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate tf records from passages/query json file for inference.
python -m language.multivec.utils.data_processor.data_processor \
--bert_hub_module_path=${HUB_DIR} \
--max_seq_length=260 \
--input_pattern=${INPUT_DIR}/passages*.json \
--output_path=${OUTPUT_DIR}/passage.tfr \
--num_threads=12 \
"""
import functools
import json
import multiprocessing
import os
from absl import app
from absl import flags
from bert import tokenization
import six
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
flags.DEFINE_string("bert_hub_module_path", None,
"Path to the BERT TF-Hub module.")
flags.DEFINE_integer("max_seq_length", 288, "Maximum block length.")
flags.DEFINE_string("input_pattern", None, "Path to input data")
flags.DEFINE_string("output_path", None, "Path to output records.")
flags.DEFINE_integer("num_threads", 12, "Number of threads.")
FLAGS = flags.FLAGS
def add_int64_feature(key, values, example):
example.features.feature[key].int64_list.value.extend(values)
class Preprocessor(object):
"""Preprocessor."""
def __init__(self, max_seq_length, tokenizer):
self._tokenizer = tokenizer
self._max_seq_length = max_seq_length
tf.logging.info("Max sequence length {}".format(self._max_seq_length))
def create_example(self, key, text):
"""Create example."""
tokens = ["[CLS]"]
tokens.extend(self._tokenizer.tokenize(text))
if len(tokens) > self._max_seq_length - 1:
tokens = tokens[:self._max_seq_length - 1]
tokens.append("[SEP]")
inputs_ids = self._tokenizer.convert_tokens_to_ids(tokens)
example = tf.train.Example()
add_int64_feature("input_ids", inputs_ids, example)
add_int64_feature("key", [int(key)], example)
return example.SerializeToString()
def example_from_json_line(self, key, text):
if not isinstance(key, six.text_type):
key = int(key.decode("utf-8"))
if not isinstance(text, six.text_type):
text = text.decode("utf-8")
return self.create_example(key, text)
def create_block_info(input_path, preprocessor):
"""Create block info."""
results = []
with tf.io.gfile.GFile(input_path) as fid:
input_file = fid.read()
data = json.loads(input_file)
for key in data:
text = data[key]
results.append(preprocessor.example_from_json_line(key, text))
return results
def get_tokenization_info(module_handle):
with tf.Graph().as_default():
bert_module = hub.Module(module_handle)
with tf.Session() as sess:
return sess.run(bert_module(signature="tokenization_info", as_dict=True))
def get_tokenizer(module_handle):
tokenization_info = get_tokenization_info(module_handle)
return tokenization.FullTokenizer(
vocab_file=tokenization_info["vocab_file"],
do_lower_case=tokenization_info["do_lower_case"])
def main(_):
pool = multiprocessing.Pool(FLAGS.num_threads)
tf.logging.info("Using hub module %s", FLAGS.bert_hub_module_path)
tokenizer = get_tokenizer(FLAGS.bert_hub_module_path)
preprocessor = Preprocessor(FLAGS.max_seq_length, tokenizer)
mapper = functools.partial(create_block_info, preprocessor=preprocessor)
block_count = 0
input_paths = tf.io.gfile.glob(FLAGS.input_pattern)
tf.logging.info("Processing %d input files.", len(input_paths))
output_dir = os.path.dirname(FLAGS.output_path)
if not tf.io.gfile.exists(output_dir):
tf.io.gfile.makedirs(output_dir)
with tf.python_io.TFRecordWriter(FLAGS.output_path) as examples_writer:
for examples in pool.imap_unordered(mapper, input_paths):
for example in examples:
examples_writer.write(example)
block_count += 1
if block_count % 10000 == 0:
tf.logging.info("Wrote %d blocks.", block_count)
tf.logging.info("Wrote %d blocks in total.", block_count)
if __name__ == "__main__":
app.run(main)
|
#!/usr/bin/env python
import re
import json
config_text="""
-s, --separator TEXT Field separator in input .csv
-q, --quoting INTEGER Control field quoting behavior per csv.QUOTE_*
constants. Use one of QUOTE_MINIMAL (0),
QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or
QUOTE_NONE (3).
--skip-errors Skip lines with too many fields instead of
stopping the import
--replace-tables Replace tables if they already exist
-t, --table TEXT Table to use (instead of using CSV filename)
-c, --extract-column TEXT One or more columns to 'extract' into a
separate lookup table. If you pass a simple
column name that column will be replaced with
integer foreign key references to a new table
of that name. You can customize the name of the
table like so:
state:States:state_name
This will pull unique values from the 'state'
column and use them to populate a new 'States'
table, with an id column primary key and a
state_name column containing the strings from
the original column.
-d, --date TEXT One or more columns to parse into ISO formatted
dates
-dt, --datetime TEXT One or more columns to parse into ISO formatted
datetimes
-df, --datetime-format TEXT One or more custom date format strings to try
when parsing dates/datetimes
-pk, --primary-key TEXT One or more columns to use as the primary key
-f, --fts TEXT One or more columns to use to populate a full-
text index
-i, --index TEXT Add index on this column (or a compound index
with -i col1,col2)
--shape TEXT Custom shape for the DB table - format is
csvcol:dbcol(TYPE),...
--filename-column TEXT Add a column with this name and populate with
CSV file name
--no-index-fks Skip adding index to foreign key columns
created using --extract-column (default is to
add them)
--no-fulltext-fks Skip adding full-text index on values extracted
using --extract-column (default is to add them)
--just-strings Import all columns as text strings by default
(and, if specified, still obey --shape,
--date/datetime, and --datetime-format)
"""
def parse_cli_args():
args = []
for line in config_text.split("\n"):
if not line:
continue
if not line.startswith("-"):
args[-1]["description"] += " "
args[-1]["description"] += line.strip()
continue
# strip out the short command arg version
if not line.startswith("--"):
line = line[line.index("--"):]
# split up the command with (optionsl) argument from description
cmd_arg, description = re.split(r"\s{2,}", line)
cmd_arg_parts = re.split(f"\s", cmd_arg)
n_parts = len(cmd_arg_parts)
if n_parts == 1:
args.append({
"command": cmd_arg_parts[0],
"args": "",
"description": description.strip()
})
elif n_parts == 2:
args.append({
"command": cmd_arg_parts[0],
"args": cmd_arg_parts[1],
"description": description.strip()
})
else:
print("Line", line)
print("cmd_arg", cmd_arg)
print("description", description)
print("cmd_arg_parts", cmd_arg_parts)
raise NotImplementedError(f"Failed parsing line with {n_parts} parts")
return args
def parsed_to_schema(parsed_args):
schema = {
"title": "CSVs-to_SQlite args",
"description": "CSVs-to-SQlite DB converter arguments",
"type": "object",
"properties": {},
}
for arg in parsed_args:
cmd = arg["command"]
name = re.sub(f"[^a-z\s]+", " ", cmd).strip()
schema["properties"][cmd] = {
"type": 'string' if arg["args"] else "boolean",
"title": name,
"description": arg["description"],
}
return schema
def pprint(*args):
strs = []
for arg in args:
if isinstance(arg, str):
strs.append(arg)
else:
strs.append(json.dumps(arg, indent=2))
print(" ".join(strs))
if __name__ == "__main__":
parsed_args = parse_cli_args()
pprint(parsed_args)
schema = parsed_to_schema(parsed_args)
pprint(schema)
|
from .general import *
from .filtering import *
|
'''
说明:用yield from实现协程中断交出控制权
分析:
每一行程序都是按顺序一步一步执行的,如果有程序不是按顺序执行,表示曾经交出了控制权,以下的例子,本来应该应该顺序输出1,2,但是因为req1交出了控制权,所以,输出了2,1
'''
import time
from collections import deque
_delay = deque()
def sleep0():
yield
return None
def req1():
yield
b = yield from sleep0()
return 1
def req2():
yield
return 2
f1 = req1()
f1.send(None)
try:
result = f1.send(None)
except StopIteration as e:
print(f'正常打印 {e.value}')
pass
else:
_delay.append((f1, 1+time.time()))
f = req2()
f.send(None)
try:
f.send(None)
except StopIteration as e:
print(f'正常打印 {e.value}')
pass
for i, v in _delay:
start = v
while True:
end = time.time()
if start < end:
try:
result = i.send(None)
except StopIteration as e:
print(f'延迟打印 {e.value}')
pass
break
pass
'''
结果:
正常打印 2
延迟打印 1
'''
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
"""Admin panel configuration for User model"""
list_display = ["username", "first_name", "last_name", "is_staff"]
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("first_name", "last_name", "email")}),
(
_("Permissions"),
{"fields": ("is_staff", "is_superuser", "is_active")},
),
(_("Important dates"), {"fields": ("last_login",)}),
)
class PostAdmin(admin.ModelAdmin):
"""Admin panel for Post model"""
list_display = ['title', 'author', 'created_at']
list_filter = ['author', 'created_at']
class CommentAdmin(admin.ModelAdmin):
"""Admin panel for Comment model"""
list_display = ['post', 'author', 'created_at']
list_filter = ['post', 'author']
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(models.Post, PostAdmin)
admin.site.register(models.Comment, CommentAdmin)
|
# String variable statements with syntax errors.
string variable = 'This line should have a string variable.'
1string_variable_2 = "This line should have another string variable."
another_string_variable = 'This line has another string variable."
yet_another_string_variable = "This line has yet another string variable.'
first_string_variable_to_print = This string variable should be printed on the next line
print(first_strng_variable_to_print)
print("Here is the string variable's value again: " + first_strng_variable_to_print)
combined_strings = "Here's a case where multiple strings " + string_variable + first_string_variable_to_print " are being combined"
print(combined_strings)
|
#!/usr/bin/env python
import rospy
import time
from std_msgs.msg import String, Int32
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
import json
# pose = Pose value
# location = string value
class Avoid():
def __init__(self):
# Publisher for object detected in front
# currently publishing "True" / "False"
# @TODO: publish the angles and/or distances
self.pubHalt = rospy.Publisher('/hearts/navigation/stop', String, queue_size=10)
# todo; define these in the launch file
# for turtlebot, use cmd_vel_mux/input/teleop
self.pubVel = rospy.Publisher('/key_vel', Twist, queue_size=10)
# Distance threshold, something <= this will cause a msg to send
self.avoidMinDistance = 0.0 # 0.5 meters
self.avoidMaxDistance = 0.3 # 0.5 meters
self.start = 0 #100
self.end = 511 #411
self.turnFlag = 0
self.stopTime = 0
self.stopTimeLimit = 100
self.stopTurnLimit = 100
# Reading the laser scan msg for object detection
# for turtlebot, use laser_scan topic
rospy.Subscriber("scan", LaserScan, self.scanCallback)
self.haltFlag = False
self.timer = 0
self.loop()
def loop(self):
rate = rospy.Rate(30)
while not rospy.is_shutdown():
if(self.timer >= 2000):
if(self.haltFlag or self.turnFlag != 0):
rospy.loginfo("stop: " + str(self.stopTime))
t = Twist()
t.linear.x = -0.1
if self.stopTime >= self.stopTimeLimit:
rospy.loginfo("turn")
t.linear.x = 0.0
t.angular.z = 0.7
self.turnFlag = self.turnFlag + 1
if(self.turnFlag >= self.stopTurnLimit):
rospy.loginfo("stop turn")
self.stopTime = 0
self.turnFlag = 0
self.timer = 0
self.pubVel.publish(t)
self.stopTime = self.stopTime + 1
else:
self.stopTime = 0
else:
pass
self.timer = self.timer + 1
rate.sleep()
def scanCallback(self, data):
# Store scan info
# notes: maybe doesn't have to do it (in the obj) every time a msg is send
# but this is probably less intensive than checking if they changed.
self.angle_min = data.angle_min
self.angle_max = data.angle_max
#self.angle_increment = data.angle_increment
#self.time_increment = data.time_increment
self.scan_time = data.scan_time
self.range_min = data.range_min
self.range_max = data.range_max
# self.ranges = self.ranges
# Bulk of the detection here:
# tl;dr: check each laser angle reading if it's under the threshold.
# also stores r1, r2 for min and max angles that something was detected
# @TODO do something useful with this info, to provide a better behaviour
i = 0 # counter
r1 = -1 # first reading
r2 = -1 # last reading
for r in data.ranges:
#for r in range(start, end):
if i >= self.start and i <= self.end:
if r >= self.avoidMinDistance and r <= self.avoidMaxDistance:
#print "Object at: " + str(i * data.angle_increment)
#print "\tReading: " + str(r)
if r1 == -1 and r2 == -1:
r1 = r
else:
r2 = r
i = i + 1 # counter
#print "\n\n"
#print data.ranges[100]
#print data.ranges[256]
#print data.ranges[411]
# If the r1 flag is set, something was detected, so send a msg
if(r1 != -1):
#print str(r1) + ", " + str(r2)
#rospy.loginfo("stop")
self.pubHalt.publish("True")
self.haltFlag = True
else:
#rospy.loginfo("go")
self.pubHalt.publish("False")
self.haltFlag = False
if __name__ == '__main__':
rospy.init_node("avoider_controller", anonymous=True)
avoid = Avoid()
rospy.spin()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from Models.DataBase import DatabaseAccessor
class Receivable(object):
DB = DatabaseAccessor()
def __init__(self, ID):
self.__ID = ID
self.__receivable = self.DB.get_receivable_info_by_id(self.__ID)
self.__electricCharge = self.__receivable["electricCharge"]
self.__guaranteeCharge = self.__receivable["guaranteeCharge"]
self.__propertyFeeCharge = self.__receivable["propertyFeeCharge"]
self.__waterCharge = self.__receivable["waterCharge"]
# self.__electricCharge = 0.0
# self.__guaranteeCharge = 0.0
# self.__propertyFeeCharge = 0.0
# self.__waterCharge = 0.0
@property
def electric(self):
return self.__electricCharge
@electric.setter
def electric(self, _electric):
self.__electricCharge = _electric
@property
def guarantee(self):
return self.__guaranteeCharge
@guarantee.setter
def guarantee(self, _guarantee):
self.__guaranteeCharge = _guarantee
@property
def propertyfee(self):
return self.__propertyFeeCharge
@propertyfee.setter
def propertyfee(self, _propertyfee):
self.__propertyFeeCharge = _propertyfee
@property
def water(self):
return self.__waterCharge
@water.setter
def water(self, _water):
self.__waterCharge = _water
@property
def allInfo(self):
allInfo = {"electric": self.__electricCharge,
"guarantee": self.__guaranteeCharge,
"propertyFee": self.__propertyFeeCharge,
"water":self.__waterCharge}
return allInfo
|
"""Simple API to access Billboard charts."""
from requests import get
from bs4 import BeautifulSoup
import re
"""
__author__ = Deepjyoti Barman
__github__ = github.com/deepjyoti30
"""
class song():
"""Class to store song details."""
def __init__(self):
self.title = ""
self.artist = ""
self.rank = 0
class Billboard():
"""Class to store billboard charts."""
def __init__(self, URL):
"""Initiate the basic stuff."""
self.baseurl = "https://www.billboard.com/charts/"
self.URL = self.baseurl + URL
self.soup = self.get_soup()
self.chart = []
self.chart_name = ""
self._get_name_of_chart()
self._get_number_one()
self._get_remaining_list()
self._replace_symbols()
def _get_soup(self):
"""Return the soup for the response."""
response = get(self.URL)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
def _replace_symbols(self):
"""Replace symbols like & with &"""
for i in self.chart:
i.title = re.sub(r'&', '&', i.title)
i.artist = re.sub(r'&', '&', i.artist)
def _get_name_of_chart(self):
"""Get the name of the chart from the webpage."""
name = self.soup.findAll('h1',
attrs={'class': 'chart-detail-header__chart-name'})
name = re.sub(r'\n', '', str(name))
try:
name = re.sub(
r'img alt=|"', '',
re.findall(r'img alt=".*?"', str(name))[0]
)
except IndexError:
name = re.sub(
r'[></]', '',
re.findall(r'>.*?</', str(name))[0]
)
self.chart_name = name
def _get_number_one(self):
"""The number one of the chart needs to be extracted seperately."""
number_one = song()
soup = self.soup
# Some extraction related to number one
chart_number_one_title = soup.findAll(
'div',
attrs={'class': 'chart-number-one__title'}
)[0]
number_one.title = re.sub(
r'[<>]', '',
re.findall(r'>.*?<', str(chart_number_one_title))[0]
)
chart_number_one_artist = str(soup.findAll(
'div',
attrs={'class': 'chart-number-one__artist'}
)[0])
chart_number_one_artist = chart_number_one_artist.replace("\n", '')
chart_number_one_artist = re.findall(
r'a href=.*?>.*?</a',
str(chart_number_one_artist)
)[0]
number_one.artist = re.sub(
r'[<>/]', '',
re.findall(r'>.*?</', chart_number_one_artist)[0]
)
number_one.rank = 1
self.chart.append(number_one)
def _get_remaining_list(self):
soup = self.soup.findAll('div', attrs={'class': 'chart-list-item'})
for i in soup:
songObj = song()
songObj.artist = re.sub(
r'data-artist=|["]', '',
re.findall(r'data-artist=".*?"', str(i))[0]
)
songObj.title = re.sub(
r'data-title=|["]', '',
re.findall(r'data-title=".*?"', str(i))[0]
)
songObj.rank = re.sub(
r'data-rank=|["]', '',
re.findall(r'data-rank=".*?"', str(i))[0]
)
self.chart.append(songObj)
if __name__ == "__main__":
Chart = Billboard("youtube")
for i in Chart.chart:
# print(i.title)
print("{}: {} by {}".format(i.rank, i.title, i.artist))
|
from django.test import TestCase
from lookout.report_schemas.base import ReportSchema
from lookout.report_schemas.generic import GenericReportSchema
class TestReportSchemaNotImplemented (TestCase):
""" Tests that schemas based on ``ReportSchema`` implement all of its abstract methods. """
def test_schema (self):
try:
class BadSchema (ReportSchema):
type = 'empty'
name = 'empty'
description = "empty"
BadSchema().schema
except NotImplementedError:
pass
else:
self.fail("BadSchema.schema should have raised a NotImplementedError")
def test_type (self):
try:
class BadSchema (ReportSchema):
schema = 'empty'
name = 'empty'
description = "empty"
BadSchema().type
except NotImplementedError:
pass
else:
self.fail("BadSchema.type should have raised a NotImplementedError")
def test_name (self):
try:
class BadSchema (ReportSchema):
schema = 'empty'
type = 'empty'
description = "empty"
BadSchema().name
except NotImplementedError:
pass
else:
self.fail("BadSchema.name should have raised a NotImplementedError")
def test_description (self):
try:
class BadSchema (ReportSchema):
schema = 'empty'
type = 'empty'
name = 'empty'
BadSchema().description
except NotImplementedError:
pass
else:
self.fail("BadSchema.description should have raised a NotImplementedError")
class TestGenericReportSchemaNotImplemented (TestCase):
""" Tests that schemas based on ``GenericReportSchema`` implement all of its abstract methods. """
def test_body_schema (self):
try:
class BadSchema (GenericReportSchema):
type = 'empty'
name = 'empty'
description = "empty"
BadSchema().body_schema
except NotImplementedError:
pass
else:
self.fail("BadSchema.body_schema should have raised a NotImplementedError")
|
"""
Current user endpoint: /me/*
"""
from fastapi import APIRouter, Depends, HTTPException, status
from app.api.utils.security import get_current_active_user
from app.models.user import User as UserModel
from app.schemas.msg import Msg
from app.schemas.user import User, UserUpdate, UserUpdateFull
from app.utils import (generate_email_confirmation_token,
send_confirmation_email,
verify_email_confirmation_token)
router = APIRouter()
@router.get("/me", response_model=User)
def read_user_me(
*,
current_user: UserModel = Depends(get_current_active_user),
):
"""Get current user."""
return current_user
# TODO: test
# TODO: check that you can't update your password directly
@router.put("/me", response_model=User)
def update_user_me(
*,
user_in: UserUpdate,
old_password: str = None,
current_user: UserModel = Depends(get_current_active_user),
):
"""Update myself"""
user = UserModel.find(current_user.id)
if user_in.password:
# updating a non-set password is forbidden
if not current_user.password_set:
raise HTTPException(
status_code=400,
detail="You must first define a password",
)
# updating a password requires to provide the old password
if not old_password:
raise HTTPException(
status_code=400,
detail="You must provide the old password",
)
if not user.authenticate(old_password):
raise HTTPException(
status_code=400,
detail="Old password is invalid",
)
user.update(user_in)
return user
# TODO: test actual deletion
# TODO: ask for password or code
@router.delete("/me", status_code=status.HTTP_204_NO_CONTENT)
def delete_user_me(
*,
current_user: UserModel = Depends(get_current_active_user),
) -> None:
"""Delete my account"""
current_user.delete()
return
# TODO: ask for password or code
@router.post("/me/change-email/{email}", response_model=Msg)
def change_email(
*,
email: str,
current_user: UserModel = Depends(get_current_active_user),
):
"""Send a confirmation link to the new email"""
token = generate_email_confirmation_token(email=current_user.email, new_email=email)
send_confirmation_email(email_to=email, token=token)
return {"msg": "A confirmation email has been sent."}
# TODO: test
@router.post("/validate-email/{token}", response_model=Msg)
def validate_email(
*,
token: str
):
"""Confirm a new email from a token"""
# decode the token
decoded_token = verify_email_confirmation_token(token)
if not decoded_token:
raise HTTPException(status_code=400, detail="Invalid token")
(old_email, new_email) = decoded_token
user = UserModel.where(email=old_email).first()
if user is None:
raise HTTPException(
status_code=404,
detail="User not found",
)
# updating the email
user_in = UserUpdateFull(email=new_email)
user.update(user_in)
return {"msg": "Email updated successfully"}
|
import sys
import random
if len(sys.argv) != 3:
print >> sys.stderr, "USAGE: python generate_partitions.py <nodes_file> <partitions_per_node>"
sys.exit()
FORMAT_WIDTH = 10
nodes = 0
for line in open(sys.argv[1],'r'):
nodes+=1
partitions = int(sys.argv[2])
ids = range(nodes * partitions)
# use known seed so this is repeatable
random.seed(92873498274)
random.shuffle(ids)
print '<cluster>'
print '<name>prodcluster</name>'
id = 0
for host in open(sys.argv[1],'r'):
print '<server>'
print " <id>%d</id>" % id
print " <host>%s</host>" % host.strip()
print ' <http-port>8081</http-port>'
print ' <socket-port>6666</socket-port>'
print ' <partitions>',
node_ids = sorted(ids[id*partitions:(id+1)*partitions])
for j in xrange(len(node_ids)):
print str(node_ids[j]) + ',',
if j % FORMAT_WIDTH == FORMAT_WIDTH - 1:
print ' ',
print ' </partitions>'
print '</server>'
id += 1
print '</cluster>'
|
import pandas as pd
import numpy as np
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
from sklearn import linear_model
from sklearn.externals import joblib
import time
'''
Load data and declare parameters
Improvements
1. Load all kinds of data - csv, pickle, text extract
2. Declare parameters in a seperate main.py file where the ml modules are called
3. Declare the parameters in CLI way
'''
# declaring parameters and inputs
data = pd.read_csv('../data/'+'train_preprocessed.csv')
feat_groups = 'RAW'
tuned_parameters = [{'alpha':[0.0005,0.001,0.005,0.01,0.1,0.5]}]
dv_col = 'SalePrice'
sample_col = 'sample'
scores=['r2']
'''
# model evaluation options - http://scikit-learn.org/stable/modules/model_evaluation.html#implementing-your-own-scoring-object
#scores for regression
['explained_variance','neg_mean_absolute_error','neg_mean_squared_error','neg_mean_squared_log_error','neg_median_absolute_error','r2']
# scores for classification
['accuracy','average_precision','f1','f1_micro','f1_macro','f1_weighted','f1_samples','neg_log_loss','precision','recall','roc_auc']
'''
# Decorator function for calculating time it takes for a function to run
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print 'func:%r args:[%r, %r] took: %2.4f sec' % \
(method.__name__, args, kw, te-ts)
return result
return timed
@timeit
def lr(data,feat_groups,dv_col,sample_col,tuned_parameters,scores):
# Find the columns with prefixes as per the feat_groups
feat_groups = feat_groups.split(',')
feature_names = set()
for i in range(len(feat_groups)):
group = feat_groups[i]
group_columns = data.filter(regex='^'+group).columns
feature_names.update(group_columns)
# Break the dataset into dev, val and X and Y parts
X_dev = data[data[sample_col]=='dev'][list(feature_names)]
Y_dev = data[data[sample_col]=='dev'][dv_col]
X_val = data[data[sample_col]=='val'][list(feature_names)]
Y_val = data[data[sample_col]=='val'][dv_col]
# Run grid search for the score you want to optimise for
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
lasso = linear_model.Lasso()
clf = GridSearchCV(lasso, tuned_parameters, cv=4, scoring=score,n_jobs=4,verbose=1)
#print("The model is trained on the full development set.")
clf.fit(X_dev, Y_dev)
df_scores = pd.DataFrame(columns=['param','mean_score','std_score'])
for i, grid_params in enumerate(clf.grid_scores_):
params, mean_score, scores = grid_params.parameters, grid_params.mean_validation_score, grid_params.cv_validation_scores
print("%0.3f (+/-%0.03f) for %r"% (mean_score, scores.std() / 2, params))
# save the results in a csv file
df_scores.loc[i] = [params,mean_score,scores.std() / 2]
df_scores.to_csv('scores'+str(score)+'.csv',index=False)
print("The scores are computed on the full evaluation set.")
y_true_dev, y_pred_dev = Y_dev, clf.predict(X_dev)
y_true_val, y_pred_val = Y_val, clf.predict(X_val)
print 'dev R2 :',r2_score(y_true_dev, y_pred_dev),'val R2 :',r2_score(y_true_val, y_pred_val)
print 'dev RMSE :',mean_squared_error(y_true_dev, y_pred_dev)**0.5,'val RMSE :',mean_squared_error(y_true_val, y_pred_val)**0.5
|
# RPi Telecine - Perforation finding and detection
#
# Perforation location and frame extraction for Super 8 and
# Standard 8 film.
#
# This has been tested using Super8 amateur film with
# black film base, commercial 8mm film with a clear film base.
#
# Quite a few assumtions are made with regards to the position of
# each perforation in the frame - that they lie in the left hand
# side of the frame - Super 8 perforations are situated in the
# middle vertically, and Standard 8 perforations are towards the
# top of the frame. The film gate holds the film horizontally
# with little movement laterally.
#
# A more complex method based on the openCV squares.py example program was tried -
# and was pretty successful, but ran very slowly on the Raspberry Pi, and not 100% reliable
# so this simpler method was developed instead.
#
# Copyright (c) 2015, Jason Lane
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import numpy as np
import scipy.ndimage.measurements as nd
# Types of film
filmTypes = ['super8', 'std8']
class telecinePerforation():
"""
Class that handles the perforation finding
"""
filmType = ''
sizeMargin = 0.2 # Margin around ROI - 0.2=20%
windowWidth = 0 # Width of window used to detect
isInitialised = False
imageSize = ( 0,0 ) # Size of the frame to convert
ROIslice = None # Slice for the ROI where the perforation should lie
ROIxy = ( 0,0 ) # Position of ROI in image
ROIwh = ( 0,0 ) # Width and height of ROI
ROIcentrexy = [ 0,0 ] # Centre xy position of ROI in image
ROIthreshold = 0 # Threshold for sprocket detection
# Used as temporary image holder when detecting perforation
ROIimg = None
# If converting colour image, use green channel otherwise do greyscale conversion (slower)
ROIuseGreenChannel = True
# Updated when the find method is called
found = False # If last detection was successful
thresholdVal = 0.98 #
expectedSize = ( 0,0 ) # Expected size of perforation
position = (0,0)
centre = (0,0) # Centre of perforation
yDiff = 0 # Difference between real and ideal position of perforation
# Ranges of acceptable values for aspect ratio, height and width of the detected perforation
aspectRange = ( 0.0, 0.0 )
widthRange = ( 0,0 )
heightRange = ( 0,0 )
checkEdges = 0
# 1 - Use top edge of perforation as reference
# 2 - Use bottom edge only as reference
# else use centre between detected top and bottom edges as reference
checkLeftEdge = True
# Some useful information based on the mm dimensions from the film specifications
perforationAspectRatio = {'super8':(0.91/1.14), 'std8':(1.8/1.23)} # Standard sizes in mm
# Frame size in proportion to the perforation size
# Can be used to automatically set a crop based on detected perforation size in pixels
frameHeightMultiplier = { 'super8':4.23/1.143, 'std8':3.81/1.23 }
frameWidthMultiplier = { 'super8':5.46/0.91, 'std8':4.5/1.8 }
useBGR = True # Use OpenCV BGR images for grey conversion
# Utility routines
def convert2grey(img):
# Return grayscale version of the image
if self.useBGR:
return np.dot(img[...,:3], [0.144, 0.587, 0.299]).astype(np.uint8)
else:
return np.dot(img[...,:3], [0.299, 0.587, 0.144]).astype(np.uint8)
def init(self, filmType, imageSize, expectedSize, cx):
# cx is the perforation film line
# size is a (w,h) tuple of a perforation size
if imageSize[0]>imageSize[1]:
self.imageSize = (imageSize[1],imageSize[0])
self.setFilmType(filmType)
self.ROIcentrexy[0] = int(cx)
self.setPerforationSize( expectedSize )
def setFilmType(self,filmType):
if filmType in filmTypes:
# Set aspect ratio bounds
self.isInitialised = False
self.filmType = filmType
aspectRatio = self.perforationAspectRatio[filmType]
aspectMargin = aspectRatio * (self.sizeMargin/2)
self.aspectRange = ( aspectRatio-aspectMargin, aspectRatio+aspectMargin)
else:
raise Exception("Error - '{}' is an incorrect film type.".format(filmType))
def setPerforationSize(self,size):
# Sets the expected size of the perforation, and a margin for error
w,h = size
if w>0 and h>0:
w_margin = int(w*self.sizeMargin)
h_margin = int(h*self.sizeMargin)
self.widthRange = ( w-w_margin , w+w_margin )
self.heightRange = ( h-h_margin , h+h_margin )
self.expectedSize = size
self.isInitialised = True
else:
self.expectedSize = (0,0)
self.ROIimg = None
self.isInitialised = False
self.setROI()
def setROI(self):
# Sets the ROI where to look for a perforation
# If an expected perforation size is set, then ROI is based on size of perforation
img_h,img_w = self.imageSize
if self.isInitialised:
# Already know expected size, so use smaller ROI
# ROI height and position on Y axis
# Top of ROI for initialised perforation detection
h = int(img_h/2) # Use 1/2 of image height for ROI
if self.filmType == 'super8':
# Middle of image height
y = int(img_h/4)
else:
# Standard 8 - top part of image
y = int(img_h/50) # 39 pixels with 1944px high image
# Base width on previously detected perforation - centre ib ROIcx
w = int((self.expectedSize[0] + (self.expectedSize[0]*self.sizeMargin))/2)
roiL = max(0, self.ROIcentrexy[0]-w)
roiR = min(img_w, self.ROIcentrexy[0]+w)
self.ROIcentrexy = [ int(roiL+(roiR-roiL)/2), int(y+(h/2)) ]
else:
# Not found before - so use larger area for detection
# Use whole image height + half image width
y = 0
h = img_h
roiL = 0
roiR = int(img_w/2)
self.ROIcentrexy = [0,0]
self.ROIxy = ( roiL, y )
self.ROIwh = ( roiR-roiL, h )
self.ROIslice = np.index_exp[ y:y+h, roiL:roiR ] # Create the slice object for making the ROI
self.ROIimg = np.zeros( (roiR-roiL, h), dtype=np.uint8) # Initialise space for the ROI image
def setROIimg(self,img):
# Sets the ROI image - converting to greyscale if necessary
if img.shape[:2] == self.imageSize:
# Expected image size OK
if len(img.shape)>2:
# Colour image, so convert it
if self.ROIuseGreenChannel:
i = img[self.ROIslice]
self.ROIimg = i[:,:,1]
else:
# do 'proper' greyscale conversion
self.ROIimg = self.convert2grey(img[self.ROIslice])
else:
# greyscale image already
self.ROIimg = img[self.ROIslice]
else:
# We have an incorrect image size - this shouldn't happen
raise Exception('Image size incorrect. Expected: {} Received: {}'.format(self.imageSize,img.shape[:2]) )
def cropToSlice( self, (x,y, w,h) ):
# Returns a numpy slice from a list or tuple for extracting a crop from the image (x,y,w,h)
x = max(x,0)
y = max(y,0)
w = max(w,1)
h = max(h,1)
return np.index_exp[ y:y+h, x:x+w ]
def findFirstFromCoords( self, img, startPosition, windowWidth ):
# Find first perforation and its size from the starting position
self.isInitialised = False
self.found = False
self.imageSize = img.shape[:2]
self.setROI()
self.setROIimg(img)
xStart = startPosition[0]
yStart = startPosition[1]
win = windowWidth//2
#take a vertical section of pixels from the ROI and threshold it
vROI = self.ROIimg[:,xStart-win:xStart+win]
threshVal = int(vROI.max()*self.thresholdVal)
#Make a single pixel wide strip, with the median of all the rows - and threshold it
vROI = np.median(vROI,axis=1) < threshVal
# And horizontal...
hROI = self.ROIimg[yStart-win:yStart+win,:]
#Make a single pixel wide strip, with the median of all the columns - and threshold it
hROI = np.median(hROI,axis=0) < threshVal
# Check if centre section is clear of data
if hROI[xStart-win:xStart+win].any() or vROI[yStart-win:yStart+win].any():
print( "Image data, so can't locate perforation at: {}".format(startPosition) )
else:
x,y = self.ROIxy
w,h = self.ROIwh
# Now to find the edges
bot = vROI[yStart:].argmax()
bot = yStart+bot if bot>0 else h
vROI = vROI[:yStart]
top = vROI[::-1].argmax()
top = yStart-top if top>0 else 0
right = hROI[xStart:].argmax()
right = xStart+right if right>0 else w
hROI = hROI[:xStart]
left = hROI[::-1].argmax()
left = xStart-left if left>0 else 0
# Sanity check the aspect ratio of detection
w = right-left
h = bot-top
aspect = float(w) / float(h)
if self.aspectRange[0] <= aspect <= self.aspectRange[1]:
# Aspect Ratio of found perforation is OK - save information
self.setPerforationSize( (w,h) )
self.setPerfPosition( x+left+((right-left)/2), y+top+(h/2) )
self.windowWidth = w - (w*self.sizeMargin*2)
self.isInitialised = True
# Now adjust ROI to match found perforation
self.ROIcentrexy[0] = self.centre[0]
self.setROI()
self.found = True
else:
print( "Perforation aspect {} ratio NOT OK - detection failed. Range: {}".format(aspect,self.aspectRange) )
def setPerfPosition(self,cx,cy):
# Sets the perforation position based on the centre
self.centre = ( int(cx), int(cy) )
self.position = ( int(cx-self.expectedSize[0]/2),int(cy-self.expectedSize[1]/2) )
self.yDiff = int(self.centre[1]-self.ROIcentrexy[1])
def findVertical(self, img):
# Used for subsequent captures where we know the expected size and
# approximate horizontal position of perforation
self.found = False
self.setROIimg(img)
expectedW, expectedH = self.expectedSize
xStart = self.ROIwh[0]//2
#xStart = self.centre[0]-ROIxy[0]
yStart = self.ROIcentrexy[1]-self.ROIxy[1]
win = (expectedW - (expectedW*self.sizeMargin) )//2
vROI = self.ROIimg[:,xStart-win:xStart+win]
threshVal = int(vROI.max() * self.thresholdVal)
vROI = np.median(vROI,axis=1) < threshVal
#print "FindVertical: vROI"
#print "shape: {}".format(vROI.shape)
x,y = self.ROIxy
w,h = self.ROIwh
# Now to find the edges
bot = vROI[yStart:].argmax()
#print("bot:{}".format(bot))
#print vROI[yStart:]
bot = yStart+bot if bot>0 else h
vROI = vROI[:yStart]
top = vROI[::-1].argmax()
#print("top:{}".format(top))
#print vROI[::-1]
top = yStart-top if top>0 else 0
if self.checkEdges==1:
# use top edge as reference and extrapolate bottom edge
bot = top+expectedH
elif self.checkEdges==2:
# use bottom edge as reference
top = bot-expectedH
# Check if detected is close to correct aspect ratio of perforation
aspect = float(expectedW) / float(bot-top)
if self.aspectRange[0] <= aspect <= self.aspectRange[1]:
# Aspect Ratio of found perforation is OK - save information
#print( "Aspect ratio OK" )
x,y = self.ROIxy
self.setPerfPosition( x + xStart, y + top + ((bot-top)/2) )
self.found = True
else:
print( "Perforation aspect {} ratio NOT OK - detection failed. Range: {}".format(aspect,self.aspectRange) )
if not(self.found):
# Try alternative method
self.findVerticalAlternative()
def findVerticalAlternative(self):
# This is an alternative method, a bit more expensive
# than the first version, and is called on failure of
# the previous findVertical. It uses Scipy labelling to segment the a strip
# of data from the ROI
self.found = False
cx = self.ROIwh[0]//2
expectedW, expectedH = self.expectedSize
win = (expectedW - (expectedW*self.sizeMargin) )//2
#take a vertical section of pixels from the ROI and threshold it
vROI = self.ROIimg[:,cx-win:cx+win]
#Make a single pixel wide strip, with the median of all the rows
vROI = np.median(vROI,axis=1)
threshVal = int(vROI.max() * self.thresholdVal)
vROIthres = vROI >= threshVal
candidate = None
if vROIthres.min() != vROIthres.max():
# Prevent a divide by zero because roi is all the same value.
# e.g. we have a frame completely white or black
lbl,numLbl = nd.label(vROIthres)
obj = nd.find_objects(lbl)
brightest = 0
for s in obj:
print s
# s is an np.slice object
sBright = np.mean(vROI[s])
sHeight = s[0].stop - s[0].start
if (self.heightRange[0] <= sHeight <= self.heightRange[1]) and sBright > brightest:
candidate = s[0]
brightest = sBright
if candidate:
self.setPerfPosition( self.ROIcentrexy[0], self.ROIxy[1]+candidate.start + ((candidate.stop-candidate.start)/2 ))
self.found = True
def findLeftEdge(self):
# Find the left edge of the perforation.
# This can be used to compensate for any horizontal
# movement of the film in the frame - this should be called
# after finding the vertical position. The left edge is used
# as the right may be overwhelmed with a bright image.
# It uses the same ROI image created in findVertical
if self.found:
# Horizontal section, and threshold
expectedW, expectedH = self.expectedSize
win = (expectedH - (expectedH*self.sizeMargin) )//2
#Centre of current perforation
centre = (self.centre[0]-self.ROIxy[0], self.centre[1]-self.ROIxy[1] )
# Horizontal strip of pixels of ROI up to centre of perforation
hROI = self.ROIimg[ centre[1]-win:centre[1]+win, :centre[0] ]
threshVal = int(hROI.max() * self.thresholdVal)
#Make a single pixel wide strip, with the median of all the columns - and threshold it
hROI = np.median(hROI, axis=0) < threshVal
# Position of edge of perforation
left = hROI[::-1].argmax()
left = centre[0]-left if left>0 else 0
self.position = ( left + self.ROIxy[0], self.position[1] )
self.centre = (left + (self.expectedSize[0]//2) + self.ROIxy[0], self.centre[1] )
else:
raise Exception('Error - Cannot do findLeftEdge until vertical has been found')
def find(self,img):
# Find perforation position in the image
if self.isInitialised:
self.findVertical(img)
if self.found and self.checkLeftEdge:
self.findLeftEdge()
else:
# We haven't initialised or run findFirstFromCoords
raise Exception('Error - Perforation detection not initialised.')
|
import torch.nn as nn
from modules.rnn import LSTM
from modules.dropout import *
from modules.crf import CRF
from torch.nn.utils.rnn import pad_sequence
import torch.nn.functional as F
from modules.BertModel import BertEmbedding
class BertSeqTagger(nn.Module):
def __init__(self, bert_embed_dim, hidden_size, num_rnn_layer,
num_tag, num_bert_layer=8,
dropout=0.5, bert_model_path=None, use_mixup=True):
super(BertSeqTagger, self).__init__()
self.bert_embed_dim = bert_embed_dim
self.num_tag = num_tag
self.dropout = dropout
self.use_mixup = use_mixup
self.bert = BertEmbedding(bert_model_path,
num_bert_layer,
proj_dim=self.bert_embed_dim,
use_proj=False)
hidden_size = self.bert_embed_dim // 2
self.seq_encoder = LSTM(input_size=self.bert_embed_dim,
hidden_size=hidden_size,
num_layers=num_rnn_layer,
dropout=dropout)
self.hidden2tag = nn.Linear(2*hidden_size, num_tag)
self.tag_crf = CRF(num_tags=num_tag, batch_first=True)
def bert_params(self):
return self.bert.bert.parameters()
def bert_named_params(self):
return self.bert.bert.named_parameters()
def base_named_params(self):
bert_param_names = []
for name, param in self.bert.bert.named_parameters():
bert_param_names.append(id(param))
other_params = []
for name, param in self.named_parameters():
if param.requires_grad and id(param) not in bert_param_names:
other_params.append((name, param))
return other_params
def base_params(self):
bert_param_names = []
for name, param in self.bert.bert.named_parameters():
if param.requires_grad:
bert_param_names.append(id(param))
other_params = []
for name, param in self.named_parameters():
if param.requires_grad and id(param) not in bert_param_names:
other_params.append(param)
return other_params
def forward(self, bert_inp1, mask1, bert_inp2=None, mask2=None, mix_lmbd=None):
'''
:param bert_inp / bert_inp2: bert_ids, segments, bert_masks, bert_lens
:param mask1 / mask2: (bs, seq_len) 0 for padding
:return:
'''
bert_repr = self.bert(*bert_inp1)
bert_repr = F.dropout(bert_repr, p=self.dropout, training=self.training)
if bert_inp2 is not None and mix_lmbd is not None:
bert_repr2 = self.bert(*bert_inp2)
bert_repr2 = F.dropout(bert_repr2, p=self.dropout, training=self.training)
len1 = bert_repr.size(1)
len2 = bert_repr2.size(1)
if len2 > len1:
tmp_repr = torch.zeros_like(bert_repr2)
# tmp_mask = mask1.new_zeros(bert_repr2.shape[:2])
tmp_mask = torch.zeros_like(mask2)
tmp_repr[:, :len1, :] = bert_repr
tmp_mask[:, :len1] = mask1
bert_repr = tmp_repr
mask1 = tmp_mask
elif len2 < len1:
tmp_repr = torch.zeros_like(bert_repr)
# tmp_mask = mask2.new_zeros(bert_repr.shape[:2])
tmp_mask = torch.zeros_like(mask1)
tmp_repr[:, :len2, :] = bert_repr2
tmp_mask[:, :len2] = mask2
bert_repr2 = tmp_repr
mask2 = tmp_mask
mask = torch.max(mask1, mask2)
bert_repr = bert_repr * mix_lmbd.unsqueeze(1) + bert_repr2 * (1 - mix_lmbd).unsqueeze(1)
else:
mask = mask1
enc_out = self.seq_encoder(bert_repr, non_pad_mask=mask.cpu())[0]
tag_score = self.hidden2tag(enc_out)
return tag_score
def tag_loss(self, tag_score, gold_tags, mask=None, mixup_ws=None, alg='crf', reduction='mean'):
'''
:param tag_score: (b, t, nb_cls)
:param gold_tags: (b, t)
:param mask: (b, t) 1对应有效部分,0对应pad
:param alg: 'greedy' and 'crf'
:return:
'''
assert alg in ['greedy', 'crf']
if alg == 'crf':
lld = self.tag_crf(tag_score, tags=gold_tags, mask=mask, mixup_ws=mixup_ws, reduction=reduction)
return lld.neg()
else:
sum_loss = F.cross_entropy(tag_score.transpose(1, 2), gold_tags, ignore_index=0, reduction='sum')
return sum_loss / mask.size(0)
def tag_decode(self, tag_score, mask=None, alg='crf'):
'''
:param tag_score: (b, t, nb_cls) emission probs
:param mask: (b, t) 1对应有效部分,0对应pad
:param alg:
:return:
'''
assert alg in ['greedy', 'crf']
if alg == 'crf':
best_tag_seq = self.tag_crf.decode(tag_score, mask=mask)
# return best segment tags
return pad_sequence(best_tag_seq, batch_first=True, padding_value=0)
else:
return tag_score.data.argmax(dim=-1)
|
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspector supports traversal of the token tree hierarchy.
In particular, inspector comes handy when we work with tokens hierarchies that
contain name components unknown to the client. Inspector understands the
hierarchical names of workflow tokens and exposes an interface to traverse
those names level-by-level. E.g., we may use the inspector to find all
workflow instance ids or all waiting jobs in a given workflow isntance.
"""
from pinball.master.thrift_lib.ttypes import GroupRequest
from pinball.workflow.name import Name
__author__ = 'Pawel Garbacki'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class Inspector(object):
def __init__(self, client):
self._client = client
def get_workflow_names(self):
"""Return list of workflow names."""
request = GroupRequest()
request.namePrefix = Name.WORKFLOW_PREFIX
request.groupSuffix = Name.DELIMITER
response = self._client.group(request)
workflow_names = []
if response.counts:
for prefix in response.counts.keys():
name = Name.from_workflow_prefix(prefix)
if name.workflow:
workflow_names.append(name.workflow)
return workflow_names
def get_workflow_instances(self, workflow_name):
"""Return list of instances of a given workflow."""
request = GroupRequest()
name = Name()
name.workflow = workflow_name
request.namePrefix = name.get_workflow_prefix()
request.groupSuffix = Name.DELIMITER
response = self._client.group(request)
instance_names = []
if response.counts:
for prefix in response.counts.keys():
name = Name.from_instance_prefix(prefix)
if name.instance:
instance_names.append(name.instance)
return instance_names
def _get_job_names(self, workflow_name, instance, state):
"""Return list of job names in a given workflow instance and state.
E.g., assume the following tokens are stored in the master:
/workflow/some_workflow/12345/waiting/some_waiting_job
/workflow/some_workflow/12345/waiting/some_other_waiting_job
/workflow/some_workflow/12345/runnable/some_runnable_job
the method called with workflow_name=some_workflow, instance=12345,
state=waiting will return [some_waiting_job, some_other_waiting_job].
"""
request = GroupRequest()
name = Name()
name.workflow = workflow_name
name.instance = instance
name.job_state = state
request.namePrefix = name.get_job_state_prefix()
request.groupSuffix = Name.DELIMITER
response = self._client.group(request)
job_names = []
if response.counts:
for job_name in response.counts.keys():
name = Name.from_job_token_name(job_name)
job_names.append(name.job)
return job_names
def get_runnable_job_names(self, workflow_name, instance):
"""Return names of runnable jobs in a given workflow instance."""
return self._get_job_names(workflow_name,
instance,
Name.RUNNABLE_STATE)
def get_waiting_job_names(self, workflow_name, instance):
"""Return names of waiting jobs in a given workflow instance."""
return self._get_job_names(workflow_name,
instance,
Name.WAITING_STATE)
def get_event_names(self, workflow_name, instance, job, input_name):
"""Return names of events under a workflow instance, job, and input."""
request = GroupRequest()
name = Name()
name.workflow = workflow_name
name.instance = instance
name.job = job
name.input = input_name
request.namePrefix = name.get_input_prefix()
request.groupSuffix = Name.DELIMITER
response = self._client.group(request)
events = []
if response.counts:
for event in response.counts.keys():
name = Name.from_event_token_name(event)
events.append(name.event)
return events
|
import collections
import dataclasses
import functools
import ipaddress
import socket
import xml.etree.ElementTree
import typing
import xmltodict
from palo_alto_firewall_analyzer.pan_config import PanConfig
# A registry is used to auto-register the policy validators and fixers.
policy_validator_registry = {}
def register_policy_validator(readable_name, description):
def inner_decorator(f):
if readable_name in policy_validator_registry:
raise KeyError(f"Name '{readable_name}' already in use!")
policy_validator_registry[readable_name] = (readable_name, description, f)
return f
return inner_decorator
def get_policy_validators():
return policy_validator_registry
policy_fixer_registry = {}
def register_policy_fixer(readable_name, description):
def inner_decorator(f):
if readable_name in policy_fixer_registry:
raise KeyError(f"Name '{readable_name}' already in use!")
policy_fixer_registry[readable_name] = (readable_name, description, f)
return f
return inner_decorator
def get_policy_fixers():
return policy_fixer_registry
@dataclasses.dataclass
class ProfilePackage:
"""Class for storing the values associated with a firewall configuration"""
panorama: str
api_key: str
pan_config: PanConfig
mandated_log_profile: str
allowed_group_profiles: typing.List[str]
default_group_profile: str
ignored_dns_prefixes: typing.List[str]
device_group_hierarchy_children: typing.Dict[str, typing.List]
device_group_hierarchy_parent: typing.Dict[str, str]
device_groups_and_firewalls: typing.Dict[str, typing.List[str]]
device_groups: typing.List[str]
devicegroup_objects: typing.Dict
devicegroup_exclusive_objects: typing.Dict
rule_limit_enabled: bool
verbose: bool
no_api: bool
BadEntry = collections.namedtuple('BadEntry', ['data', 'text', 'device_group', 'entry_type'])
@functools.lru_cache(maxsize=None)
def cached_dns_lookup(domain):
try:
return socket.gethostbyname(domain)
except socket.gaierror:
return None
def get_single_ip_from_address(address_entry):
"""
address_entry: Address object
Return: An ip address that is inside of the Address Object.
"""
if "ip-netmask" in address_entry:
return ipaddress.ip_network(address_entry['ip-netmask'], False)[0].exploded
elif 'ip-range' in address_entry:
return address_entry['ip-range'].split('-', 1)[0]
elif 'fqdn' in address_entry:
ip = cached_dns_lookup(address_entry['fqdn'])
if ip:
return ip
else:
# wildcard masks aren't supported yet
raise Exception(f"Unable to extract an ip from {address_entry}")
@functools.lru_cache(maxsize=None)
def xml_object_to_dict(xml_obj):
obj_xml_string = xml.etree.ElementTree.tostring(xml_obj)
obj_dict = xmltodict.parse(obj_xml_string)
return obj_dict
@functools.lru_cache(maxsize=None)
def get_single_ip_from_address(address_entry):
"""
address_entry: Address object
Return: An ip address that is inside of the Address Object.
"""
address_dict = xml_object_to_dict(address_entry)['entry']
if "ip-netmask" in address_dict:
return ipaddress.ip_network(address_dict['ip-netmask'], False)[0].exploded
elif 'ip-range' in address_dict:
return address_dict['ip-range'].split('-', 1)[0]
elif 'fqdn' in address_dict:
ip = cached_dns_lookup(address_dict['fqdn'])
if ip:
return ip
else:
# wildcard masks aren't supported yet
raise Exception(f"Unable to extract an ip from {address_entry}")
def _squash_devicegroup(device_group, device_group_hierarchy_children):
"""Recursive function for determining all of a device group's child device groups"""
result = [device_group]
if device_group in device_group_hierarchy_children:
for child_dg in device_group_hierarchy_children[device_group]:
result += _squash_devicegroup(child_dg, device_group_hierarchy_children)
return sorted(result)
def squash_all_devicegroups(device_groups_and_firewalls, device_group_hierarchy_children,
device_group_hierarchy_parent):
"""Squashes all device groups, so that a single device group can be mapped to all child Device Groups
This is useful for when seeing which device groups rules at a higher-level device group apply to"""
all_devicegroups = {}
for device_group in device_groups_and_firewalls.keys():
all_devicegroups[device_group] = _squash_devicegroup(device_group, device_group_hierarchy_children)
return all_devicegroups
|
from loguru import logger
from app.arq.tasks.classes.abstract import AbstractSyncTask
from app.utils.minio import upload_screenshot
class UploadScrenshotTask(AbstractSyncTask):
def __init__(self, uuid: str, screenshot: bytes):
self.screenshot = screenshot
self.uuid = uuid
def _process(self) -> None:
upload_screenshot("uzen-screenshot", self.uuid, self.screenshot)
logger.debug(f"Screenshot is uploadted as {self.uuid}.png")
@classmethod
def process(cls, uuid: str, screenshot: bytes) -> None:
instance = cls(uuid, screenshot)
return instance.safe_process()
|
import pygame
import time
import random
pygame.init()
white = (255,255,255)
black = (0,0,0)
red =(200,0,0)
light_red = (255,0,0)
yellow = (200,200,0)
light_yellow = (255,255,0)
green = (34,177,76)
light_green = (0,255,0)
display_width = 800
display_height = 600
clock = pygame.time.Clock()
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption("3d")
smallfont = pygame.font.SysFont("comicsansms", 25)
medfont = pygame.font.SysFont("comicsansms", 50)
largefont = pygame.font.SysFont("comicsansms", 85)
FPS = 30
def gameLoop():
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
pass
if event.key == pygame.K_RIGHT:
pass
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
pass
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
pass
gameDisplay.fill(black)
clock.tick(FPS)
gameLoop()
|
import argparse
import os
import sys
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
# args
parser = argparse.ArgumentParser()
parser.add_argument("-g", action="store_true", help="show graph")
parser.add_argument("filename", help="CSV file")
args = parser.parse_args()
# data
df = pd.read_csv(args.filename)
print(df)
print()
# separate the output column
y_name = df.columns[-1]
y_df = df[y_name]
X_df = df.drop(y_name, axis=1)
# one-hot encode categorical features
X_df = pd.get_dummies(X_df)
print(X_df)
print()
# numpy arrays
X_ar = np.array(X_df, dtype=np.float32)
y_ar = np.array(y_df, dtype=np.float32)
# scale values to 0-1 range
X_ar = preprocessing.MinMaxScaler().fit_transform(X_ar)
y_ar = y_ar.reshape(-1, 1)
y_ar = preprocessing.MinMaxScaler().fit_transform(y_ar)
y_ar = y_ar[:, 0]
# split into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X_ar, y_ar, random_state=0, test_size=0.25
)
print(f"training data: {X_train.shape} -> {y_train.shape}")
print(f"testing data: {X_test.shape} -> {y_test.shape}")
print()
# torch tensors
X_tensor = torch.from_numpy(X_train)
y_tensor = torch.from_numpy(y_train)
# hyperparameters
in_features = X_train.shape[1]
out_features = 1
epochs = 5000
# model
model = nn.Linear(in_features, out_features)
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.5)
# train
print("training")
for epoch in range(1, epochs + 1):
# forward
output = model(X_tensor)
cost = criterion(output, y_tensor)
# backward
optimizer.zero_grad()
cost.backward()
optimizer.step()
# print progress
if epoch % (epochs // 50) == 0:
print(f"{epoch:6d} {cost.item():10f}")
print()
# test
with torch.no_grad():
X_tensor = torch.from_numpy(X_train)
predicted = model(X_tensor).detach().numpy()
errors = abs(predicted - y_test)
print("mean squared error:", np.mean(np.square(errors)))
print("mean absolute error:", np.mean(errors))
# graph
if args.g:
plt.plot(X_train, y_train, label=args.filename)
plt.plot(X_train, predicted, label="predicted")
plt.legend()
plt.savefig(os.path.splitext(os.path.basename(sys.argv[0]))[0] + ".png")
plt.show(block=False)
plt.pause(10)
|
# Generated by Django 2.0 on 2017-12-10 17:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20171210_1927'),
]
operations = [
migrations.CreateModel(
name='RiderReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review', models.TextField()),
('driver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Driver')),
('rider_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.RiderProfile')),
],
),
]
|
"""
Platform support interface for fab_support
This will redirect various tasks to the correct platform to support them
"""
from fabric.api import env
#
import fab_support.heroku as heroku
from fab_support import FabricSupportException
def env_to_platform(stage):
"""
Given a stage will return the
:param stage: Which stage is being used eg prod uat
:return:
"""
platform = env["stages"][stage]["FS_PLATFORM"]
if platform in ("heroku",): # limit for security
return platform
else:
raise FabricSupportException(f"Unknown platform: {platform}")
def env_to_function(stage, my_function_name):
"""
Given the name of a stage and function will return that function
:param stage: Which stage is being used eg prod uat
:return:
"""
platform = env_to_platform(stage)
the_module = globals()[platform] # This is an indirect reference to the module
func = getattr(the_module, my_function_name)
return func
def fab_support_function(stage, function_name, **kwargs):
func = env_to_function(stage, function_name)
func(stage, **kwargs)
|
#!/usr/bin/env python
import time
from hexdump import hexdump
from panda import Panda
from bitstring import BitArray, BitStream
p = Panda()
# this is a test, no safety
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
# get version
print(p.get_version())
# **** test K/L line loopback ****
send_bus = 3
receive_bus = 2
p.set_uart_baud(send_bus, 10400)
p.set_uart_baud(receive_bus, 10400)
p.set_uart_parity(send_bus, 0) # parity, 0=off, 1=even, 2=odd
p.set_uart_parity(receive_bus, 0)
while 1:
#LIN send
#st ="test"
#st = b"\x60"+chr(len(st)+3).encode()+st
#lin_break = "\x00\x00"
#lin_sync = "\x55"
#dat = "\x3c\x60\x06\xB1\x11\x00\x00\x00\x04\xd2"
#dat = "\x60\x06\xB1\x11\x00\x00\x00\x04"
#dat = "\x55\x3c\x60\x06\xB1\x11\x00\x00\x00\x04\xD2"
#dat2 = "\x55\x7D"
#st = lin_break + lin_sync + dat
#st = lin_sync + dat
#st = dat
#st2 = dat2
#st = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0E\x0F"
#st = "\x55"
#st = BitArray('0b10000000')
#st = b"\xaa"+chr(len(st)+3).encode()+st
print("Drain anything in the buffer:")
ret = p.kline_drain(bus=send_bus)
hexdump(ret)
ret = p.kline_drain(bus=send_bus)
hexdump(ret)
ret = p.kline_drain(bus=send_bus)
hexdump(ret)
print("Sending Assign frameID, default NAD used and ID(PxReq) = 04,ID(PxResp) = 05: SB SF 3C 60 06 B1 11 00 00 00 04 D2")
p.kline_send("\x55\x3c\x60\x06\xB1\x11\x00\x00\x00\x04\xD2", bus=send_bus, checksum=False)
print("Sending Empty header for slave to respond with positive response: SB SF 7D")
p.kline_send("\x55\x7D", bus=send_bus, checksum=False)
time.sleep(0.2)
#LIN RECV
ret = p.kline_drain(bus=send_bus)
print("RET Data positive response:")
hexdump(ret)
print("Sending Datadump1, 8 x LSE: SB SF 3C 60 06 B4 00 00 FF 00 00 E4")
#print("Sending Datadump1, 8 x High Side Emable: SB SF 3C 60 06 B4 00 FF 00 00 00 E4")
p.kline_send("\x55\x3c\x60\x06\xb4\x00\x00\xff\x00\x00\xe4", bus=send_bus, checksum=False)
#p.kline_send("\x55\x3c\x60\x06\xb4\x00\xff\x00\x00\x00\xe4", bus=send_bus, checksum=False)
print("Sending Empty header for slave to respond with config: SB SF 7D")
p.kline_send("\x55\x7D", bus=send_bus, checksum=False)
time.sleep(0.2)
#LIN RECV
ret = p.kline_drain(bus=send_bus)
print("RET Data config:")
hexdump(ret)
print("Sending Datadump2, no capture and threshold select (optional) SB SF 3C 60 06 B4 40 00 00 00 00 A4: ")
p.kline_send("\x55\x3c\x60\x06\xb4\x40\x00\x00\x00\x00\xa4", bus=send_bus, checksum=False)
print("Sending Empty header for slave to respond with config 2: SB SF 7D")
p.kline_send("\x55\x7D", bus=send_bus, checksum=False)
time.sleep(0.2)
#LIN RECV
ret = p.kline_drain(bus=send_bus)
print("RET Data config 2:")
hexdump(ret)
print("Sending Datadump3, LHvalue=0x55, default PWM = 0x10 (optional) SB SF 3C 60 04 B4 80 55 10 FF FF 01:")
p.kline_send("\x55\x3c\x60\x04\xb4\x80\x55\x10\xff\xff\x01", bus=send_bus, checksum=False)
print("Sending Empty header for slave to respond with config 3: SB SF 7D")
p.kline_send("\x55\x7D", bus=send_bus, checksum=False)
time.sleep(0.2)
#LIN RECV
ret = p.kline_drain(bus=send_bus)
print("RET Data config 3:")
hexdump(ret)
# print("Sending Diagnostic Data Block 4: SB SF 3C 60 02 B4 C0 FF FF FF FF 28:")
# p.kline_send("\x55\x3c\x60\x02\xb4\xc0\xff\xff\xff\xff\x28", bus=send_bus, checksum=False)
#
# print("Sending Empty header for slave to respond with diag 4: SB SF 7D")
# p.kline_send("\x55\x7D", bus=send_bus, checksum=False)
#
# time.sleep(0.2)
#
# #LIN RECV
# ret = p.kline_drain(bus=send_bus)
# print("RET Data diag 4:")
# hexdump(ret)
print("Sending Read by identifier request (optional) SB SF 3C 60 06 B2 00 11 00 00 00 D5:")
p.kline_send("\x55\x3c\x60\x06\xb2\x00\x11\x00\x00\x00\xd5", bus=send_bus, checksum=False)
print("Sending Empty header for slave to respond with id request postive resp: SB SF 7D")
p.kline_send("\x55\x7D", bus=send_bus, checksum=False)
time.sleep(0.2)
#LIN RECV
ret = p.kline_drain(bus=send_bus)
print("RET Data id request positive response:")
hexdump(ret)
print("P0 on: SB SF C4 01 80 7E")
p.kline_send("\x55\xc4\x01\x80\x7e", bus=send_bus, checksum=False)
time.sleep(0.2)
print("Any return?:")
hexdump(ret)
print("P1 on: SB SF C4 02 80 7D")
p.kline_send("\x55\xc4\x02\x80\x7d", bus=send_bus, checksum=False)
time.sleep(0.2)
print("Any return?:")
hexdump(ret)
print("P2 on: SB SF C4 04 80 7B")
p.kline_send("\x55\xc4\x04\x80\x7b", bus=send_bus, checksum=False)
time.sleep(0.2)
print("Any return?:")
hexdump(ret)
print("P3 on: SB SF C4 08 80 77")
p.kline_send("\x55\xc4\x08\x80\x77", bus=send_bus, checksum=False)
time.sleep(0.2)
print("Any return?:")
hexdump(ret)
print("P4 on: SB SF C4 10 80 6F")
p.kline_send("\x55\xc4\x10\x80\x6f", bus=send_bus, checksum=False)
time.sleep(0.2)
print("Any return?:")
hexdump(ret)
print("P5 on: SB SF C4 20 80 5F")
p.kline_send("\x55\xc4\x20\x80\x5f", bus=send_bus, checksum=False)
time.sleep(0.2)
print("Any return?:")
hexdump(ret)
print("P6 on: SB SF C4 40 80 3F")
p.kline_send("\x55\xc4\x40\x80\x3f", bus=send_bus, checksum=False)
time.sleep(0.2)
print("Any return?:")
hexdump(ret)
print("P7 on: SB SF C4 80 80 FE")
p.kline_send("\x55\xc4\x80\x80\xfe", bus=send_bus, checksum=False)
time.sleep(0.2)
print("Any return?:")
hexdump(ret)
exit(0)
# this sets bus 2 to actually be GMLAN
# p2.set_gmlan(bus=2)
#
# send w bitbang then without
# iden = 123
# iden = 18000
# dat = "\x01\x02"
# dat = "\x01\x02\x03\x04\x05\x06\x07\x08"
# while 1:
# iden += 1
# p1.set_gmlan(bus=None)
# p1.can_send(iden, dat, bus=3)
# p1.set_gmlan(bus=2)
# p1.can_send(iden, dat, bus=3)
# time.sleep(0.01)
# print p2.can_recv()
# exit(0)
|
from pytest import mark
from cats.v2 import ByteCodec
class TestBytesCodec:
@mark.parametrize('inp, res', (
(b'Hello', b'Hello'),
(bytearray([10]), b'\x0A'),
(memoryview(b'Hello'), b'Hello')
))
@mark.asyncio
async def test_encode_success(self, inp, res):
assert await ByteCodec.encode(inp, {}) == res
@mark.asyncio
async def test_decode_success(self):
assert await ByteCodec.decode(b'Hello', {}) == b'Hello'
|
#!/usr/bin/env python3
import os
import sys
import connexion
sys.path.append(os.path.join(os.path.dirname(__file__)))
from cerise.config import make_config
from cerise.front_end.encoder import JSONEncoder
app = connexion.App(__name__, specification_dir='front_end/swagger/')
app.app.json_encoder = JSONEncoder
app.add_api('swagger.yaml', base_path='/', arguments={'title': 'Cerise'})
application = app.app
if __name__ == '__main__':
config = make_config()
app.run(host=config.get_service_host(), port=config.get_service_port())
|
from api import app
from api.views import login
from api.views import hits
from api.views import authentication
app.run(host='0.0.0.0', port=8080)
|
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Tomasz Kornuta"
import torch
from ptp.components.component import Component
from ptp.components.mixins.word_mappings import WordMappings
from ptp.data_types.data_definition import DataDefinition
class LabelIndexer(Component, WordMappings):
"""
Class responsible for changing of samples consisting of single words/labels into indices (that e.g. can be latter used for loss calculation, PyTorch-style).
"""
def __init__(self, name, config):
"""
Initializes the component.
:param name: Component name (read from configuration file).
:type name: str
:param config: Dictionary of parameters (read from the configuration ``.yaml`` file).
:type config: :py:class:`ptp.configuration.ConfigInterface`
"""
# Call constructor(s) of parent class(es) - in the right order!
Component.__init__(self, name, LabelIndexer, config)
WordMappings.__init__(self)
# Set key mappings.
self.key_inputs = self.stream_keys["inputs"]
self.key_outputs = self.stream_keys["outputs"]
# Get value from configuration.
self.out_of_vocabulary_value = self.config["out_of_vocabulary_value"]
def input_data_definitions(self):
"""
Function returns a dictionary with definitions of input data that are required by the component.
:return: dictionary containing input data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return {
self.key_inputs: DataDefinition([-1, 1], [list, str], "Batch of labels (words), each represented as a single string [BATCH_SIZE] x [string]"),
}
def output_data_definitions(self):
"""
Function returns a dictionary with definitions of output data produced the component.
:return: dictionary containing output data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return {
self.key_outputs: DataDefinition([-1], [torch.Tensor], "Batch of labels, each represented as a single index [BATCH_SIZE]")
}
def __call__(self, data_dict):
"""
Encodes "inputs" in the format of a single word.
Stores result in "outputs" field of in data_dict.
:param data_dict: :py:class:`ptp.utils.DataDict` object containing (among others):
- "inputs": expected input field containing list of words [BATCH_SIZE] x x [string]
- "outputs": added output field containing list of indices [BATCH_SIZE]
"""
# Get inputs to be encoded.
inputs = data_dict[self.key_inputs]
outputs_list = []
# Process samples 1 by 1.
for sample in inputs:
assert not isinstance(sample, (list,)), 'This encoder requires input sample to contain a single word'
# Process single token.
if sample in self.word_to_ix.keys():
output_sample = self.word_to_ix[sample]
else:
# Word out of vocabulary.
output_sample = self.out_of_vocabulary_value
outputs_list.append(output_sample)
# Transform to tensor.
output_tensor = torch.tensor(outputs_list)
# Create the returned dict.
data_dict.extend({self.key_outputs: output_tensor})
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Assets/Change_Account_PIN.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Change_Account_PIN(object):
def setupUi(self, Change_Account_PIN):
Change_Account_PIN.setObjectName("Change_Account_PIN")
Change_Account_PIN.resize(1270, 590)
Change_Account_PIN.setMinimumSize(QtCore.QSize(1270, 590))
Change_Account_PIN.setMaximumSize(QtCore.QSize(1270, 590))
Change_Account_PIN.setStyleSheet("#Change_Account_PIN{\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0.02, y2:0.0340909, stop:0 rgba(255, 255, 255, 255), stop:0.495 rgba(0, 0, 0, 199), stop:0.505 rgba(28, 28, 28, 255), stop:1 rgba(53, 53, 53, 255));\n"
"}\n"
"#topframe{\n"
"background-color: qconicalgradient(cx:0.000472637, cy:0, angle:227.8, stop:0 rgba(30, 0, 255, 255), stop:0.375 rgba(0, 195, 255, 252), stop:0.423533 rgba(37, 0, 255, 255), stop:0.45 rgba(0, 130, 255, 255), stop:0.477581 rgba(0, 56, 255, 252), stop:0.518717 rgba(71, 171, 255, 255), stop:0.542289 rgba(71, 171, 255, 255), stop:0.547264 rgba(68, 134, 249, 255), stop:0.55 rgba(0, 74, 255, 255), stop:0.552239 rgba(21, 93, 243, 255), stop:0.57754 rgba(102, 0, 255, 255), stop:0.625 rgba(0, 185, 255, 247), stop:1 rgba(255, 255, 0, 69));\n"
"border:none;\n"
"}\n"
"#bottomframe{\n"
"background-color: qlineargradient(spread:pad, x1:0.025, y1:0.892545, x2:0.0199005, y2:0, stop:0 rgba(244, 244, 244, 255), stop:0.495 rgba(218, 212, 186, 230), stop:0.505 rgba(236, 224, 224, 255), stop:1 rgba(253, 246, 246, 255));\n"
"}\n"
"QPushButton{\n"
"border-radius:15px;\n"
"background:#aaff00;\n"
"border:2px solid #aaff00;\n"
"font: 75 12pt \"MS Sans Serif\";\n"
"}\n"
"#frame{\n"
"background:#d4d4d4;\n"
"}\n"
"Line{\n"
"background:rgb(6, 6, 6)\n"
"}\n"
"#label_12{\n"
"background:#000000;\n"
"color:white;\n"
"}\n"
"QGroupBox{\n"
"background:white;\n"
"}")
self.bottomframe = QtWidgets.QFrame(Change_Account_PIN)
self.bottomframe.setGeometry(QtCore.QRect(10, 520, 1251, 61))
self.bottomframe.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.bottomframe.setFrameShadow(QtWidgets.QFrame.Raised)
self.bottomframe.setObjectName("bottomframe")
self.label = QtWidgets.QLabel(self.bottomframe)
self.label.setGeometry(QtCore.QRect(490, 10, 171, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.bottomframe)
self.label_2.setGeometry(QtCore.QRect(410, 30, 381, 16))
self.label_2.setObjectName("label_2")
self.sideframe = QtWidgets.QFrame(Change_Account_PIN)
self.sideframe.setGeometry(QtCore.QRect(9, 119, 181, 401))
self.sideframe.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.sideframe.setFrameShadow(QtWidgets.QFrame.Raised)
self.sideframe.setObjectName("sideframe")
self.Admin_Home_btn_2 = QtWidgets.QPushButton(self.sideframe)
self.Admin_Home_btn_2.setGeometry(QtCore.QRect(8, 189, 171, 31))
self.Admin_Home_btn_2.setObjectName("Admin_Home_btn_2")
self.User_Details_btn_2 = QtWidgets.QPushButton(self.sideframe)
self.User_Details_btn_2.setGeometry(QtCore.QRect(7, 230, 171, 31))
self.User_Details_btn_2.setObjectName("User_Details_btn_2")
self.Log_out_btn_2 = QtWidgets.QPushButton(self.sideframe)
self.Log_out_btn_2.setGeometry(QtCore.QRect(8, 270, 161, 31))
self.Log_out_btn_2.setObjectName("Log_out_btn_2")
self.label_12 = QtWidgets.QLabel(self.sideframe)
self.label_12.setGeometry(QtCore.QRect(10, 165, 171, 21))
self.label_12.setObjectName("label_12")
self.User_Picture = QtWidgets.QLabel(self.sideframe)
self.User_Picture.setGeometry(QtCore.QRect(10, 10, 161, 141))
self.User_Picture.setText("")
self.User_Picture.setObjectName("User_Picture")
self.topframe = QtWidgets.QPushButton(Change_Account_PIN)
self.topframe.setGeometry(QtCore.QRect(10, 10, 1251, 101))
self.topframe.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Assets\\Images/Bank-Management-System.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.topframe.setIcon(icon)
self.topframe.setIconSize(QtCore.QSize(1300, 520))
self.topframe.setObjectName("topframe")
self.groupBox = QtWidgets.QGroupBox(Change_Account_PIN)
self.groupBox.setGeometry(QtCore.QRect(440, 250, 501, 171))
self.groupBox.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox.setObjectName("groupBox")
self.label_3 = QtWidgets.QLabel(self.groupBox)
self.label_3.setGeometry(QtCore.QRect(10, 50, 111, 20))
self.label_3.setObjectName("label_3")
self.New_Account_Pin = QtWidgets.QLineEdit(self.groupBox)
self.New_Account_Pin.setGeometry(QtCore.QRect(130, 50, 231, 22))
self.New_Account_Pin.setObjectName("New_Account_Pin")
self.label_4 = QtWidgets.QLabel(self.groupBox)
self.label_4.setGeometry(QtCore.QRect(10, 90, 131, 20))
self.label_4.setObjectName("label_4")
self.Confirm_Account_Pin = QtWidgets.QLineEdit(self.groupBox)
self.Confirm_Account_Pin.setGeometry(QtCore.QRect(140, 90, 231, 22))
self.Confirm_Account_Pin.setObjectName("Confirm_Account_Pin")
self.pushButton = QtWidgets.QPushButton(self.groupBox)
self.pushButton.setGeometry(QtCore.QRect(320, 127, 161, 31))
self.pushButton.setObjectName("pushButton")
self.textEdit = QtWidgets.QTextEdit(Change_Account_PIN)
self.textEdit.setGeometry(QtCore.QRect(350, 130, 701, 101))
self.textEdit.setObjectName("textEdit")
self.retranslateUi(Change_Account_PIN)
QtCore.QMetaObject.connectSlotsByName(Change_Account_PIN)
def retranslateUi(self, Change_Account_PIN):
_translate = QtCore.QCoreApplication.translate
Change_Account_PIN.setWindowTitle(_translate("Change_Account_PIN", "Bank Management System"))
self.label.setText(_translate("Change_Account_PIN", "Project by: Ankit Choudhary "))
self.label_2.setText(_translate("Change_Account_PIN", "ID: 19CS014 Anand International College of Engineering, Jaipur"))
self.Admin_Home_btn_2.setText(_translate("Change_Account_PIN", "Change Password"))
self.User_Details_btn_2.setText(_translate("Change_Account_PIN", "Change PIN No."))
self.Log_out_btn_2.setText(_translate("Change_Account_PIN", "Sign/Log out"))
self.label_12.setText(_translate("Change_Account_PIN", "Security Settings"))
self.groupBox.setTitle(_translate("Change_Account_PIN", " :: Change Account PIN :: "))
self.label_3.setText(_translate("Change_Account_PIN", "New Account PIN : "))
self.label_4.setText(_translate("Change_Account_PIN", "Confirm Account PIN :"))
self.pushButton.setText(_translate("Change_Account_PIN", "Change PIN"))
self.textEdit.setHtml(_translate("Change_Account_PIN", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:12pt; font-weight:600;\">Change Account PIN</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">If you feel that you have a weaker strength password, then plese change it. We recommend to change your password in every 45 days to make it secure.</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Account Pin Change guidline.</span></p></body></html>"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Change_Account_PIN = QtWidgets.QWidget()
ui = Ui_Change_Account_PIN()
ui.setupUi(Change_Account_PIN)
Change_Account_PIN.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 18:47:50 2017
@author: adelpret
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from dynamic_graph.sot.torque_control.hrp2.control_manager_conf import IN_OUT_GAIN
from scipy import ndimage
from identification_utils import solve1stOrderLeastSquare
def identify_motor_static(enc, dq, ctrl, current, tau, JOINT_ID, JOINT_NAME, ZERO_VELOCITY_THRESHOLD,
ZERO_VELOCITY_THRESHOLD_SMALL, SHOW_THRESHOLD_EFFECT):
# remove high velocity
maskConstAng = (abs(dq) < ZERO_VELOCITY_THRESHOLD)
# erode to get only steady phases where velocity is small
maskConstAng = ndimage.morphology.binary_erosion(maskConstAng, None, 100)
maskPosVel = (dq > ZERO_VELOCITY_THRESHOLD_SMALL)
maskNegVel = (dq < -ZERO_VELOCITY_THRESHOLD_SMALL)
maskConstPosAng = np.logical_and(maskConstAng, maskPosVel)
maskConstNegAng = np.logical_and(maskConstAng, maskNegVel)
if SHOW_THRESHOLD_EFFECT:
plt.figure()
plt.plot(enc, label='q')
q_const = enc.copy()
q_const[np.logical_not(maskConstAng)] = np.nan
plt.plot(q_const, label='q_const')
plt.legend()
# identify current sensor gain
x = current[maskConstAng]
y = ctrl[maskConstAng] / IN_OUT_GAIN
maskPosErr = np.logical_and(y - x > 0.0, np.abs(x) > 0.5)
maskNegErr = np.logical_and(y - x < 0.0, np.abs(x) > 0.5)
print("Number of samples with constant angle:", x.shape[0])
print("Number of samples with constant angle and pos vel:", x[maskPosErr].shape[0])
print("Number of samples with constant angle and neg vel:", x[maskNegErr].shape[0])
if (x[maskPosErr].shape[0] < 10):
(Ks, DZ) = solve1stOrderLeastSquare(x[maskNegErr], y[maskNegErr])
elif (x[maskNegErr].shape[0] < 10):
(Ks, DZ) = solve1stOrderLeastSquare(x[maskPosErr], y[maskPosErr])
else:
(Ksn, DZn) = solve1stOrderLeastSquare(x[maskNegErr], y[maskNegErr])
(Ksp, DZp) = solve1stOrderLeastSquare(x[maskPosErr], y[maskPosErr])
Ks = 0.5 * (Ksp + Ksn)
Ks = min([Ksp, Ksn])
DZ = 0.5 * (DZp - DZn)
print("Current sensor gains = ", Ksp, Ksn)
print("Deadzones = ", DZp, -DZn)
x_neg = x[maskNegErr]
y_neg = y[maskNegErr]
plt.figure()
plt.plot(x_neg, y_neg, '.', lw=3, markersize=1, c='0.5')
plt.plot([min(x_neg), max(x_neg)], [Ksn * min(x_neg) + DZn, Ksn * max(x_neg) + DZn], 'g:', lw=3)
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$u(t)$')
plt.title('Negative current errors - Joint ' + JOINT_NAME)
x_pos = x[maskPosErr]
y_pos = y[maskPosErr]
plt.figure()
plt.plot(x_pos, y_pos, '.', lw=3, markersize=1, c='0.5')
plt.plot([min(x_pos), max(x_pos)], [Ksp * min(x_pos) + DZp, Ksp * max(x_pos) + DZp], 'g:', lw=3)
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$u(t)$')
plt.title('Positive current errors - Joint ' + JOINT_NAME)
plt.show()
if (Ks < 0.0):
print("ERROR: estimated Ks is negative! Setting it to 1")
Ks = 1.0
# plot dead zone effect ********************************************
plt.figure()
plt.plot(Ks * current, label='current')
plt.plot(ctrl / IN_OUT_GAIN, label='control')
plt.legend()
plt.figure()
y = Ks * current[maskConstAng]
x = ctrl[maskConstAng] / IN_OUT_GAIN - Ks * current[maskConstAng]
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$ctrl(t)-i(t)$')
plt.plot(x, y, '.', lw=3, markersize=1, c='0.5')
plt.plot(x[maskPosErr], y[maskPosErr], 'rx', lw=3, markersize=1, label='pos err')
plt.plot(x[maskNegErr], y[maskNegErr], 'bx', lw=3, markersize=1, label='neg err')
plt.legend()
plt.figure()
y = ctrl[maskConstAng] / IN_OUT_GAIN
x = ctrl[maskConstAng] / IN_OUT_GAIN - Ks * current[maskConstAng]
plt.ylabel(r'$ctrl(t)$')
plt.xlabel(r'$ctrl(t)-i(t)$')
plt.plot(x, y, '.', lw=3, markersize=1, c='0.5')
plt.plot(x[maskPosErr], y[maskPosErr], 'rx', lw=3, markersize=1, label='pos err')
plt.plot(x[maskNegErr], y[maskNegErr], 'bx', lw=3, markersize=1, label='neg err')
plt.legend()
plt.figure()
y = ctrl / IN_OUT_GAIN
x = Ks * current
plt.ylabel(r'$ctrl(t)$')
plt.xlabel(r'$i(t)$')
plt.plot(x, y, '.', lw=3, markersize=1, c='0.5')
plt.plot([-3, 3], [-3, 3])
plt.show()
# y = a. x + b
# i = Kt.tau + Kf
# Identification ***************************************************
y = current # *Ks
x = tau
(Ktp, Kfp) = solve1stOrderLeastSquare(x[maskConstPosAng], y[maskConstPosAng])
(Ktn, b) = solve1stOrderLeastSquare(x[maskConstNegAng], y[maskConstNegAng])
Kfn = -b
# Plot *************************************************************
plt.figure()
plt.axhline(0, color='black', lw=1)
plt.axvline(0, color='black', lw=1)
plt.plot(x, y, '.', lw=3, markersize=1, c='0.5')
plt.plot(x[maskConstPosAng], y[maskConstPosAng], 'rx', lw=3, markersize=1)
plt.plot(x[maskConstNegAng], y[maskConstNegAng], 'bx', lw=3, markersize=1)
# plot identified lin model
plt.plot([min(x), max(x)], [Ktp * min(x) + Kfp, Ktp * max(x) + Kfp], 'g:', lw=3)
plt.plot([min(x), max(x)], [Ktn * min(x) - Kfn, Ktn * max(x) - Kfn], 'g:', lw=3)
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$\tau(t)$')
plt.title('Static experiment - Joint ' + JOINT_NAME)
print("cur_sens_gain[%d] = %f" % (JOINT_ID, Ks))
print('deadzone[%d] = %f' % (JOINT_ID, DZ))
print('Kt_p[%d] = %f' % (JOINT_ID, Ktp))
print('Kt_n[%d] = %f' % (JOINT_ID, Ktn))
print('Kf_p[%d] = %f' % (JOINT_ID, Kfp))
print('Kf_n[%d] = %f' % (JOINT_ID, Kfn))
print('Kt_m[%d] = %f' % (JOINT_ID, (Ktp + Ktn) / 2.0))
print('Kf_m[%d] = %f' % (JOINT_ID, (Kfp + Kfn) / 2.0))
return (Ktp, Ktn, Ks, DZ)
|
A, B = map(int, input().split())
for a in range(1, B + 1):
x = (A + (a - 1)) // a * a
y = x + a
if y > B:
continue
result = a
print(result)
|
# -*- coding: utf-8 -*-
"""exercicio-02.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1qI-SbkB65f7XZPwYaeSAdg7h1t7s-ah8
"""
5 / 2
#2.5
7 * 4 + 2
#30
(7 * 4) + 2
#30
7 * (4 + 2)
#42
2 ** 3
#8
2 ** 3 ** 4
#2417851639229258349412352
2 ** -3 ** 4
#4.1359030627651384e-25
5 % 2
#1
6 % 2
#0
7 % 2
#1
8 % 2
#0
5 + 1
#6
5.0 + 1
6.0
5 * math.log10(100) - 8 ** 2
#-54.0
math.pi
#3.141592653589793
math.sin(math.pi / 2 )
#1.0
math.cos(math.pi / 4 )
0.7071067811865476
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-10-01 21:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wells', '0024_auto_20180925_2022'),
]
operations = [
migrations.AddField(
model_name='decommissiondescription',
name='activity_submission',
field=models.ForeignKey(blank=True, db_column='filing_number', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='decommission_description_set', to='wells.ActivitySubmission'),
),
migrations.AddField(
model_name='decommissiondescription',
name='well',
field=models.ForeignKey(blank=True, db_column='well_tag_number', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='decommission_description_set', to='wells.Well'),
),
]
|
import argparse
import json
from Bio import Phylo, SeqIO
from Bio.Align import MultipleSeqAlignment
from treetime import TreeAnc
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Add translations",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tree', type=str, required=True, help="input tree")
parser.add_argument('--translations', type=str, nargs='+', required=True, help="amino acid alignment")
parser.add_argument('--genes', type=str, nargs='+', required=True, help="amino acid alignment")
parser.add_argument('--output', type=str, metavar="JSON", required=True, help="output Auspice JSON")
args = parser.parse_args()
genes = args.genes if type(args.genes)==list else [args.genes]
translations = args.translations if type(args.translations)==list else [args.translations]
T = Phylo.read(args.tree, 'newick')
leafs = {n.name for n in T.get_terminals()}
node_data = {}
for gene, translation in zip(genes, translations):
seqs = []
for s in SeqIO.parse(translation, 'fasta'):
if s.id in leafs:
seqs.append(s)
tt = TreeAnc(tree=T, aln=MultipleSeqAlignment(seqs), alphabet='aa')
tt.infer_ancestral_sequences(reconstruct_tip_states=True)
with open(translation.replace('.fasta', '_withInternalNodes.fasta'), 'w') as fh:
for n in tt.tree.find_clades():
if n.name not in node_data:
node_data[n.name] = {"aa_muts":{}}
node_data[n.name]["aa_muts"][gene] = [f"{a}{p+1}{d}" for a,p,d in n.mutations]
fh.write(f">{n.name}\n{tt.sequence(n, as_string=True, reconstructed=True)}\n")
with open(args.output, 'w') as fh:
json.dump({"nodes":node_data}, fh)
|
from sklearn.metrics import f1_score
import torch
from dataset import Dataset
from sklearn.preprocessing import label_binarize
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
import os
from torchvision import transforms
import numpy as np
from PIL import Image, ImageFilter
from torch.autograd import Variable
model = torch.load('../BEST_checkpoint_resnet50.pth.tar')['model']
device = torch.device('cuda:2')
model.to(device)
model.eval()
data_transforms = transforms.Compose([
transforms.Lambda(lambda image: image.convert('RGB')),
transforms.Lambda(lambda image: image.filter(ImageFilter.EDGE_ENHANCE_MORE)),
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
class_to_idx = {'T0':0, 'T1':1, 'T2':2, 'T2b': 2, 'T3': 3, 'T3a': 3, 'T3b' : 3, 'T4':4}
n_classes = 5
folder = '/home/tianshu/bladder-cancer/dataset/bbox_images'
test_data = '/home/tianshu/bladder-cancer/dataset/test.txt'
img_pths = []
labels = []
with open(test_data, 'r') as infile:
lines = infile.readlines()
for line in lines:
words = (line.strip('\n')).split(' ')
fn = words[0]
label = words[2]
img_pth = folder+'/'+fn+'_bbox.jpg'
img_pths.append(img_pth)
labels.append(label)
infile.close()
y_scores = []
y_trues = []
for i, img_pth in enumerate(img_pths, 0):
img = Image.open(img_pth)
img = data_transforms(img)
data = img.unsqueeze_(0)
data = Variable(data)
data = data.to(device)
y_true = labels[i]
'''
ground_truth = class_to_idx[y_true]
#for recall and F1
prob = torch.exp(model.forward(data))
_, y_pred = prob.topk(1)
y_pred = y_pred.cpu().detach().numpy()[0][0]
'''
if((y_true=='T2') or (y_true=='T2b')):
ground_truth = 1
else:
ground_truth = 0
prob = torch.exp(model.forward(data))
#top_probs, top_labs = prob.topk(5)
_, top1 = prob.topk(1)
top1 = top1.cpu().detach().numpy()[0][0]
if(top1 == 2):
y_pred = 1
else:
y_pred = 0
y_trues.append(ground_truth)
y_scores.append(y_pred)
y_trues = np.array(y_trues)
y_scores = np.array(y_scores)
'''
from sklearn.metrics import classification_report
target_names = ['T0', 'T1', 'T2', 'T3', 'T4']
print(classification_report(y_trues, y_scores, target_names=target_names))
'''
fpr, tpr, thresholds = metrics.roc_curve(y_trues.ravel(), y_scores.ravel())
auc_score = metrics.roc_auc_score(y_trues.ravel(), y_scores.ravel())
auc = metrics.auc(fpr, tpr)
print(auc)
print(auc_score)
with open('ROCs.txt', 'a') as out:
out.write(str(fpr) + ' ' + str(tpr) + ' ' + str(auc) + ' T2')
out.close()
|
import json
from discord.ext.commands import Cog, Bot, Context
from utils.utils import log_event, db, get_dict
class Extension(Cog):
def __init__(self, _bot: Bot):
self.bot = _bot
@Cog.listener()
async def on_ready(self):
log_event(f'{self.qualified_name} extension loaded')
class DatabaseHandler(Extension):
def __init__(self, _bot: Bot, db_key: str):
super().__init__(_bot)
self.DB_KEY = db_key
def set_value_for_server(self, guild_id, value):
raw_dict = db.get(self.DB_KEY)
if raw_dict is None:
dictionary = {}
else:
dictionary = get_dict(raw_dict)
dictionary[str(guild_id)] = value
db.set(self.DB_KEY, json.dumps(dictionary))
def remove_server(self, guild_id: int):
raw_dict = db.get(self.DB_KEY)
if raw_dict is not None:
dictionary = get_dict(raw_dict)
try:
dictionary.pop(str(guild_id))
db.set(self.DB_KEY, json.dumps(dictionary))
except KeyError:
pass
# On Leaving Server
@Cog.listener()
async def on_guild_remove(self, guild: Context.guild):
self.remove_server(guild_id=guild.id)
log_event(f"left the server '{guild}'")
|
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to team games."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
from ba._freeforallsession import FreeForAllSession
from ba._gameactivity import GameActivity
from ba._gameresults import TeamGameResults
from ba._dualteamsession import DualTeamSession
if TYPE_CHECKING:
from typing import Any, Dict, Type, Sequence
from bastd.actor.playerspaz import PlayerSpaz
import ba
class TeamGameActivity(GameActivity):
"""Base class for teams and free-for-all mode games.
Category: Gameplay Classes
(Free-for-all is essentially just a special case where every
ba.Player has their own ba.Team)
"""
@classmethod
def supports_session_type(cls, sessiontype: Type[ba.Session]) -> bool:
"""
Class method override;
returns True for ba.DualTeamSessions and ba.FreeForAllSessions;
False otherwise.
"""
return (issubclass(sessiontype, DualTeamSession)
or issubclass(sessiontype, FreeForAllSession))
def __init__(self, settings: Dict[str, Any]):
super().__init__(settings)
# By default we don't show kill-points in free-for-all.
# (there's usually some activity-specific score and we don't
# wanna confuse things)
if isinstance(_ba.getsession(), FreeForAllSession):
self._show_kill_points = False
def on_transition_in(self) -> None:
# pylint: disable=cyclic-import
from ba._coopsession import CoopSession
from bastd.actor.controlsguide import ControlsGuide
super().on_transition_in()
# On the first game, show the controls UI momentarily.
# (unless we're being run in co-op mode, in which case we leave
# it up to them)
if not isinstance(self.session, CoopSession):
# FIXME: Need an elegant way to store on session.
if not self.session.have_shown_controls_help_overlay:
delay = 4.0
lifespan = 10.0
if self.slow_motion:
lifespan *= 0.3
ControlsGuide(delay=delay,
lifespan=lifespan,
scale=0.8,
position=(380, 200),
bright=True).autoretain()
self.session.have_shown_controls_help_overlay = True
def on_begin(self) -> None:
super().on_begin()
try:
# Award a few achievements.
if isinstance(self.session, FreeForAllSession):
if len(self.players) >= 2:
from ba import _achievement
_achievement.award_local_achievement('Free Loader')
elif isinstance(self.session, DualTeamSession):
if len(self.players) >= 4:
from ba import _achievement
_achievement.award_local_achievement('Team Player')
except Exception:
from ba import _error
_error.print_exception()
def spawn_player_spaz(self,
player: ba.Player,
position: Sequence[float] = None,
angle: float = None) -> PlayerSpaz:
"""
Method override; spawns and wires up a standard ba.PlayerSpaz for
a ba.Player.
If position or angle is not supplied, a default will be chosen based
on the ba.Player and their ba.Team.
"""
if position is None:
# In teams-mode get our team-start-location.
if isinstance(self.session, DualTeamSession):
position = (self.map.get_start_position(player.team.get_id()))
else:
# Otherwise do free-for-all spawn locations.
position = self.map.get_ffa_start_position(self.players)
return super().spawn_player_spaz(player, position, angle)
def end( # type: ignore
self,
results: Any = None,
announce_winning_team: bool = True,
announce_delay: float = 0.1,
force: bool = False) -> None:
"""
End the game and announce the single winning team
unless 'announce_winning_team' is False.
(for results without a single most-important winner).
"""
# pylint: disable=arguments-differ
from ba._coopsession import CoopSession
from ba._multiteamsession import MultiTeamSession
from ba._general import Call
# Announce win (but only for the first finish() call)
# (also don't announce in co-op sessions; we leave that up to them).
session = self.session
if not isinstance(session, CoopSession):
do_announce = not self.has_ended()
super().end(results, delay=2.0 + announce_delay, force=force)
# Need to do this *after* end end call so that results is valid.
assert isinstance(results, TeamGameResults)
if do_announce and isinstance(session, MultiTeamSession):
session.announce_game_results(
self,
results,
delay=announce_delay,
announce_winning_team=announce_winning_team)
# For co-op we just pass this up the chain with a delay added
# (in most cases). Team games expect a delay for the announce
# portion in teams/ffa mode so this keeps it consistent.
else:
# don't want delay on restarts..
if (isinstance(results, dict) and 'outcome' in results
and results['outcome'] == 'restart'):
delay = 0.0
else:
delay = 2.0
_ba.timer(0.1, Call(_ba.playsound, _ba.getsound('boxingBell')))
super().end(results, delay=delay, force=force)
|
#import libraries
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from datetime import datetime
import pandas as pd
#path for webdriver
driverpath = "PATH for your chromedriver"
#load data from csv file
df = pd.read_csv("si126_namelist.csv")
urllist = list(df[df.GSX == True].formlink)
namelist = list(df[df.GSX == True].nickname)
#sending mail merge
for i in range(len(urllist)):
#rest time from previous session
driver = webdriver.Chrome(driverpath)
time.sleep(3)
sending_url = driver.get(urllist[i])
send_to = namelist[i]
time.sleep(1)
sender_txt = "@sikawit"
greeting_txt = f"""Hi {send_to.strip()}!
ยินดีด้วยครับคุณหมอ ในที่สุดก็เดินทางมาถึงเส้นชัยที่ยากที่สุดทางหนึ่งละครับ (ซึ่งผมขอหนีไปก่อน 555) ขอให้หมอเป็นหมอที่ดีครับ หวังว่าคงได้เจอกัน (คงไม่ใช่ในฐานะคนไข้นะ) หากมีอะไรที่ให้ช่วยได้ก็บอกมาได้ครัชช
ยินดีอีกครั้งครับ
Sake
*****
Generated from a bot on {datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S UTC%Z")}
Find out more at https://github.com/sikawit/FarewellSI126"""
sender_fill = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[2]/div[1]/div/div/div[2]/div/div[1]/div/div[1]/input')
sender_fill.send_keys(sender_txt)
greeting_fill = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[2]/div[2]/div/div/div[2]/div/div[1]/div[2]/textarea')
greeting_fill.send_keys(greeting_txt)
submit = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[3]/div[1]/div/div/span')
submit.click()
time.sleep(3)
driver.close()
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Kafka"""
import re
from typing import Any, Optional, Dict
from kafka import KafkaConsumer
from kafka.errors import KafkaError
import json
from sqlalchemy.sql.sqltypes import Boolean
from airflow.exceptions import AirflowException, AirflowConfigException
from airflow.hooks.base import BaseHook
auth_type_options = [
'PLAIN',
'OAUTHBEARER',
'SCRAM-SHA-256',
'SCRAM-SHA-512',
]
class KafkaHook(BaseHook):
"""
Kafka interaction hook, a Wrapper around Kafka Python SDK.
:param kafka_conn_id: reference to a pre-defined Kafka Connection
:type kafka_conn_id: str
"""
default_conn_name = 'kafka_default'
conn_type = "kafka"
conn_name_attr = "kafka_conn_id"
hook_name = "Kafka"
def __getattribute__(self, name: str) -> Any:
try:
return super(KafkaHook, self).__getattribute__(name)
except Exception:
return None
@staticmethod
def get_ui_field_behaviour() -> Dict:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['port'],
"relabeling": {
'host': 'bootstrap servers',
'login': 'user',
'schema': 'topic',
},
}
def __init__(self, kafka_conn_id: str = default_conn_name) -> None:
super().__init__()
self.kafka_conn_id = kafka_conn_id
self.client = None
# self.get_conn()
self._create_consumer_config()
def _get_connection_default_config(self):
extra_options = {}
if not self.kafka_conn_id:
raise AirflowException(
'Failed to create Kafka client. no kafka_conn_id provided')
conn = self.get_connection(self.kafka_conn_id)
if conn.extra is not None:
extra_options = conn.extra_dejson
self._topic = extra_options.get('topic', '')
self._group_id = extra_options.get('group_id', '')
self._servers = extra_options.get('bootstrap_servers', '')
self._auth_type = extra_options.get('auth_type', '')
if not self._auth_type:
self.log.info(u'Kafka Consumer 认证方式为空')
self._user = extra_options.get('user', '')
self._password = extra_options.get('password', '')
self._security_protocol = extra_options.get(
'security_protocol', '')
def _create_consumer_config(self) -> Dict:
self._get_connection_default_config()
self.log.info(u'Create Kafka Consume Server:{}, Topic: {}, Group: {}'.format(
self.servers, self.topic, self.group_id))
consumer_config = {
'group_id': self.group_id,
'bootstrap_servers': self.servers,
'auto_offset_reset': 'earliest', # 不会收到之前重复的数据
# 数据只支持json数据包
'value_deserializer': lambda data: json.loads(data),
}
if self._auth_type:
self.consumer_add_auth_config(consumer_config)
self._consumer_config = consumer_config
return consumer_config
def update_consumer_config(self, key: str, val: Any):
if not self._consumer_config:
return
self._consumer_config.update({key, val})
def _create_consumer(self) -> KafkaConsumer:
consumer_config = self._consumer_config
if not consumer_config:
raise AirflowConfigException(u'请先设置Kafka配置信息')
consumer: KafkaConsumer = KafkaConsumer(self.topic, **consumer_config)
return consumer
def ensure_consumer(self) -> Optional[KafkaConsumer]:
try:
if self.client:
return self.client
self.validate_consumer_configuration()
self.client = self._create_consumer()
except Exception as e:
self.log.error(e)
raise e
def consumer_add_auth_config(self, config) -> Dict:
if not config:
return config
if self._auth_type == auth_type_options[0]:
# PLAIN
if self._security_protocol not in ['SASL_PLAINTEXT', 'SASL_SSL']:
raise AirflowConfigException(u'认证PLAINT类型, protocol配置错误')
config.update({
'security_protocol': self._security_protocol,
'sasl_mechanism': auth_type_options[0],
'sasl_plain_username': self._user,
'sasl_plain_password': self._password,
})
if self._auth_type in [auth_type_options[2], auth_type_options[3]]:
# SCRAM-SHA-256 or SCRAM-SHA-512
if self._security_protocol not in ['SASL_PLAINTEXT', 'SASL_SSL']:
raise AirflowConfigException(
u'认证{}类型, protocol配置错误'.format(self._auth_type))
config.update({
'security_protocol': self._security_protocol,
'sasl_mechanism': self._auth_type,
'sasl_plain_username': self._user,
'sasl_plain_password': self._password,
})
return config
def validate_consumer_configuration(self):
if not self.topic:
raise AirflowConfigException(u'Kafka Consumer Topic为空')
if not self.group_id:
raise AirflowConfigException(u'Kafka Consumer 消费组为空')
if not self.servers:
raise AirflowConfigException(u'Kafka Consumer 需要连接的远程服务器为空')
return True
@property
def servers(self):
return self._servers
@property
def topic(self):
return self._topic
@property
def group_id(self):
return self._group_id
def unsubscribe(self):
client = self.get_conn()
return client.unsubscribe()
def close(self):
client = self.get_conn()
return client.close()
def bootstrap_connected(self):
client = self.get_conn()
return client.bootstrap_connected()
def get_conn(self) -> KafkaConsumer:
return self.ensure_consumer()
|
import math
def samples(n):
return int(n * (math.log(n) + 0.577216) + 1.0/2.0)
experiment_name = 'final-4-AdaMax-restarts-learned'
circuit_generator_script = 'random_circuit.py'
# parameters to be tested
number_of_qubits = [4]
n = 4
number_of_cycles = [5, 10, 15, 20]
number_of_circuits = 20 #number of random circuits with same number of qubits and cycles
number_of_nodes = [1]
number_of_tasks_per_node = [1]
number_of_omp_threads = [1]
number_of_training_samples = sorted([samples(n), samples(n**2), samples(0.95* 2**n), samples(0.9 * 2**n), samples(0.85 * 2**n), samples(n**3)])
number_of_training_iterations = [1000, 10000, 100000]
number_of_initial_hidden_units = [int(q*(q-1)/2.0) for q in number_of_qubits]
number_of_sample_steps = [0]#q if q%2 != 0 else q+1 for q in number_of_qubits]
number_of_runs = 5 #number of runs for a specific circuit
randomRestarts = 5
earlyStopping = True
# AdaDelta, AdaGrad, AdaMax, AMSGrad, Momentum, RMSProp, Sgd, StochasticReconfiguration
optimizer = 'AdaMax'
learnCZ = True
|
from Jumpscale import j
import random, requests, uuid
import subprocess, uuid
skip = j.baseclasses.testtools._skip
@skip("https://github.com/threefoldtech/jumpscaleX_builders/issues/50")
def before_all():
pass
def random_string():
return str(uuid.uuid4())[:10]
def info(message):
j.tools.logger._log_info(message)
def set_database_data(database):
database.name = str(uuid.uuid4()).replace("-", "")[1:10]
database.admin_email = "{}@example.com".format(database.name)
database.admin_passwd_ = random_string()
def before():
info("Install odoo server , and get new instance of it ")
j.servers.odoo.install()
odoo_server.start()
def after():
info(" Stop odoo server.")
odoo_server.stop()
odoo_server = j.servers.odoo.get()
def test_01_create_database():
"""
- Install and start odoo server , and get new instance of it .
- Create new database.
- Check that created database exist in databases_list.
- Check that new data base created successfully.
- stop odoo server.
"""
info("Create new database ")
database = odoo_server.databases.new()
set_database_data(database)
odoo_server.databases_create()
odoo_server.save()
info("Check that created database exist in databases_list.")
databases = odoo_server.databases_list()
assert database.name in databases
info("Check that new database created successfully.")
database_client = odoo_server.client_get(database.name)
user_name = random_string()
user_password = random_string()
database_client.user_add(user_name, user_password)
database_client.login(user_name, user_password)
wrong_passsword = random_string()
try:
database_client.login(user_name, wrong_passsword)
raise "error should be raised "
except Exception as e:
info("error raised {}".format(e))
database_client.user_delete(user_name, user_password)
try:
database_client.login(user_name, user_password)
raise "error should be raised "
except Exception as e:
info("error raised {}".format(e))
info(" stop odoo server.")
odoo_server.stop()
def test02_create_databases():
"""
- Install and start odoo server , and get new instance of it .
- Create database [db1].
- Create second database [db2] with reset=false, should create another database only..
- Create another database [db3] with reset =true, should delete all old databases and create another one.
"""
info("Create database [db1].")
db1 = odoo_server.databases.new()
set_database_data(db1)
odoo_server.databases_create()
odoo_server.save()
info("Create second database [db2] with reset=false, should create another database only.")
db2 = odoo_server.databases.new()
set_database_data(db2)
odoo_server.databases_create(reset=False)
odoo_server.save()
assert db1.name in odoo_server.databases_list()
assert db2.name in odoo_server.databases_list()
info("Create another database [db3] with reset =true, should delete all old databases and create another one.")
db3 = odoo_server.databases.new()
set_database_data(db3)
odoo_server.databases_create(reset=True)
odoo_server.save()
assert db1.name not in odoo_server.databases_list()
assert db2.name not in odoo_server.databases_list()
assert db3.name in odoo_server.databases_list()
def test03_reset_databases():
"""
- Install and start odoo server , and get new instance of it .
- Try reset_database, should delete all databases.
"""
info("Create database.")
db = odoo_server.databases.new()
set_database_data(db)
odoo_server.databases_create()
odoo_server.save()
info("Try reset_database, should delete all databases.")
odoo_server.databases_reset()
assert odoo_server.databases_list() == []
def test04_export_import_databases():
"""
- Install and start odoo server , and get new instance of it .
- Export created database, check that zip file exist.
- Import database, check that imported database exist in database list
"""
info("Create database.")
db = odoo_server.databases.new()
set_database_data(db)
odoo_server.databases_create()
odoo_server.save()
info("Export created database, check that zip file exist.")
export_dir = "/root/exports/"
result = j.sal.process.execute("mkdir {}".format(export_dir))
odoo_server.database_export(db.name, export_dir)
result = j.sal.process.execute(" ls /root/exports")
assert "{}.zip".format(db.name) in result[1]
info("Import database, check that imported database exist in database list")
odoo_server.databases_reset()
odoo_server.database_import(db.name, export_dir)
assert db.name in odoo_server.databases_list()
def test05_write_and_read():
"""
- Install and start odoo server , and get new instance of it .
- Create database [db].
- Wrtie data[dt] in [db], check that it writes successfully.
- Export data [dt].
- Import data [dt].
- Read data [dt] from db [db].
- Delete data [dt], check it deleted successfully.
"""
pass
|
import json
import os
import logging
import base64
import urllib
from json import JSONDecodeError
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from pajbot.managers.db import DBManager
from pajbot.managers.redis import RedisManager
from pajbot.models.user import User
log = logging.getLogger(__name__)
def init(app):
def twitch_login(scopes):
csrf_token = base64.b64encode(os.urandom(64)).decode("utf-8")
session["csrf_token"] = csrf_token
state = {"csrf_token": csrf_token, "return_to": request.args.get("returnTo", None)}
params = {
"client_id": app.bot_config["twitchapi"]["client_id"],
"redirect_uri": app.bot_config["twitchapi"]["redirect_uri"],
"response_type": "code",
"scope": " ".join(scopes),
"state": json.dumps(state),
}
authorize_url = "https://id.twitch.tv/oauth2/authorize?" + urllib.parse.urlencode(params)
return redirect(authorize_url)
bot_scopes = [
"user_read",
"user:edit",
"user:read:email",
"channel:moderate",
"chat:edit",
"chat:read",
"whispers:read",
"whispers:edit",
"channel_editor",
"channel:read:subscriptions",
]
streamer_scopes = ["channel:read:subscriptions"]
@app.route("/login")
def login():
return twitch_login(scopes=[])
@app.route("/bot_login")
def bot_login():
return twitch_login(scopes=bot_scopes)
@app.route("/streamer_login")
def streamer_login():
return twitch_login(scopes=streamer_scopes)
@app.route("/login/error")
def login_error():
return render_template("login_error.html")
@app.route("/login/authorized")
def authorized():
# First, validate state with CSRF token
# (CSRF token from request parameter must match token from session)
state_str = request.args.get("state", None)
if state_str is None:
return render_template("login_error.html", return_to="/", detail_msg="State parameter missing"), 400
try:
state = json.loads(state_str)
except JSONDecodeError:
return render_template("login_error.html", return_to="/", detail_msg="State parameter not valid JSON"), 400
# we now have a valid state object, we can send the user back to the place they came from
return_to = state.get("return_to", None)
if return_to is None:
# either not present in the JSON at all, or { "return_to": null } (which is the case when you
# e.g. access /bot_login or /streamer_login directly)
return_to = "/"
def login_error(code, detail_msg=None):
return render_template("login_error.html", return_to=return_to, detail_msg=detail_msg), code
csrf_token = state.get("csrf_token", None)
if csrf_token is None:
return login_error(400, "CSRF token missing from state")
csrf_token_in_session = session.pop("csrf_token", None)
if csrf_token_in_session is None:
return login_error(400, "No CSRF token in session cookie")
if csrf_token != csrf_token_in_session:
return login_error(403, "CSRF tokens don't match")
# determine if we got ?code= or ?error= (success or not)
# https://tools.ietf.org/html/rfc6749#section-4.1.2
if "error" in request.args:
# user was sent back with an error condition
error_code = request.args["error"]
optional_error_description = request.args.get("error_description", None)
if optional_error_description is not None:
user_detail_msg = f"Error returned from Twitch: {optional_error_description} (code: {error_code}"
else:
user_detail_msg = f"Error returned from Twitch (code: {error_code})"
return login_error(400, user_detail_msg)
if "code" not in request.args:
return login_error(400, "No ?code or ?error present on the request")
# successful authorization
code = request.args["code"]
try:
# gets us an UserAccessToken object
access_token = app.twitch_id_api.get_user_access_token(code)
except:
log.exception("Could not exchange given code for access token with Twitch")
return login_error(500, "Could not exchange the given code for an access token.")
user_basics = app.twitch_helix_api.fetch_user_basics_from_authorization(
(app.api_client_credentials, access_token)
)
with DBManager.create_session_scope(expire_on_commit=False) as db_session:
me = User.from_basics(db_session, user_basics)
session["user"] = me.jsonify()
# bot login
if me.login == app.bot_config["main"]["nickname"].lower():
redis = RedisManager.get()
redis.set(f"authentication:user-access-token:{me.id}", json.dumps(access_token.jsonify()))
log.info("Successfully updated bot token in redis")
# streamer login
if me.login == app.bot_config["main"]["streamer"].lower():
# there's a good chance the streamer will later log in using the normal login button.
# we only update their access token if the returned scope containes the special scopes requested
# in /streamer_login
# We use < to say "if the granted scope is a proper subset of the required scopes", this can be case
# for example when the bot is running in its own channel and you use /bot_login,
# then the granted scopes will be a superset of the scopes needed for the streamer.
# By doing this, both the streamer and bot token will be set if you complete /bot_login with the bot
# account, and if the bot is running in its own channel.
if set(access_token.scope) < set(streamer_scopes):
log.info("Streamer logged in but not all scopes present, will not update streamer token")
else:
redis = RedisManager.get()
redis.set(f"authentication:user-access-token:{me.id}", json.dumps(access_token.jsonify()))
log.info("Successfully updated streamer token in redis")
return redirect(return_to)
@app.route("/logout")
def logout():
session.pop("user", None)
return_to = request.args.get("returnTo", "/")
if return_to.startswith("/admin"):
return_to = "/"
return redirect(return_to)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 15:37:02 2018
@author: patrickmcfarlane
test_playbyplay.py
This function contains the tests for
functions in the playbyplay.py file
"""
from .__init__ import HEADERS
from ..playbyplay import PlayByPlay
def test_playbyplay():
""" tests the playbyplay endpoint of the PlayByPlay class
"""
example_pbp = PlayByPlay(headers=HEADERS,
game_id='0021500002')
table_names = example_pbp.data.keys()
assert 'PlayByPlay' in table_names
assert 'AvailableVideo' in table_names
example_game = example_pbp.data['PlayByPlay'][0]
example_video = example_pbp.data['AvailableVideo'][0]
assert list(example_game.keys()) == ['GAME_ID',
'EVENTNUM',
'EVENTMSGTYPE',
'EVENTMSGACTIONTYPE',
'PERIOD',
'WCTIMESTRING',
'PCTIMESTRING',
'HOMEDESCRIPTION',
'NEUTRALDESCRIPTION',
'VISITORDESCRIPTION',
'SCORE',
'SCOREMARGIN']
assert list(example_video.keys()) == ['VIDEO_AVAILABLE_FLAG']
def test_playbyplayv2():
""" tests the playbyplayv2 endpoint of the PlayByPlay class
"""
example_pbp = PlayByPlay(headers=HEADERS,
endpoint='playbyplayv2',
game_id='0021500002')
table_names = example_pbp.data.keys()
assert 'PlayByPlay' in table_names
assert 'AvailableVideo' in table_names
example_game = example_pbp.data['PlayByPlay'][0]
example_video = example_pbp.data['AvailableVideo'][0]
assert list(example_game.keys()) == ['GAME_ID',
'EVENTNUM',
'EVENTMSGTYPE',
'EVENTMSGACTIONTYPE',
'PERIOD',
'WCTIMESTRING',
'PCTIMESTRING',
'HOMEDESCRIPTION',
'NEUTRALDESCRIPTION',
'VISITORDESCRIPTION',
'SCORE',
'SCOREMARGIN',
'PERSON1TYPE',
'PLAYER1_ID',
'PLAYER1_NAME',
'PLAYER1_TEAM_ID',
'PLAYER1_TEAM_CITY',
'PLAYER1_TEAM_NICKNAME',
'PLAYER1_TEAM_ABBREVIATION',
'PERSON2TYPE',
'PLAYER2_ID',
'PLAYER2_NAME',
'PLAYER2_TEAM_ID',
'PLAYER2_TEAM_CITY',
'PLAYER2_TEAM_NICKNAME',
'PLAYER2_TEAM_ABBREVIATION',
'PERSON3TYPE',
'PLAYER3_ID',
'PLAYER3_NAME',
'PLAYER3_TEAM_ID',
'PLAYER3_TEAM_CITY',
'PLAYER3_TEAM_NICKNAME',
'PLAYER3_TEAM_ABBREVIATION']
assert list(example_video.keys()) == ['VIDEO_AVAILABLE_FLAG']
|
from django.conf.urls import patterns, include, url
from views import DashboardView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^/$', DashboardView.as_view(), name="dashboard"),
)
|
from weeby.media import Gif
from weeby.overlays import Overlay
from .json_response import JSON
from .effects import Image
from .overlays import Overlay
from .media import Gif
from .generators import Generator
class Weeby:
def __init__(self, token: str) -> None:
self.token = token
def get_json_response(self) -> JSON:
return JSON(self.token)
def apply_effect(self) -> Image:
return Image(self.token)
def set_overlay(self) -> Overlay:
return Overlay(self.token)
def get_gif(self) -> Gif:
return Gif(self.token)
def generate(self) -> Generator:
return Generator(self.token)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .eval_hooks import MyDistEvalHook, MyEvalHook
from .eval_metrics import (calculate_confusion_matrix, f1_score, precision,
precision_recall_f1, recall, support, class_accuracy,
CCC_score)
from .mean_ap import average_precision, mAP
from .multilabel_eval_metrics import average_performance
__all__ = [
'MyDistEvalHook', 'MyEvalHook', 'precision', 'recall', 'f1_score', 'support',
'average_precision', 'mAP', 'average_performance',
'calculate_confusion_matrix', 'precision_recall_f1', 'class_accuracy',
'CCC_score'
]
|
#!/usr/bin/env python3
# coding=utf-8
"""Ubiquiti Networks Discovery Protocol Tool"""
import socket
from typing import List, Tuple
from platform import platform
from uuid import getnode as get_mac
from time import time
from struct import pack
from UbntTLV import UbntTLV
from UbntTuple import lookup_tlv_type, UbntTuple
from UbntLogging import l, e, d, set_debugging
import getopt
import sys
SOCKET = None
GLOBAL_TIMEOUT = 20
DISPLAY_MODE = 'edge' # 'edge', 'oneline', 'everything'
def print_one_line(packet: UbntTLV) -> None:
"""Print a discovery as one line for column view"""
ipv4: str = ''
model: str = ''
hostname: str = ''
hwaddr: str = ''
for t in packet.TLVs:
cur_type = t.Type
if cur_type == 0x0b: # hostname
hostname = t.value_to_str()
elif cur_type == 0x0c: # model name
model = t.value_to_str()
elif cur_type == 0x02: # 'MAC address and IP address'
ipv4 = t.value_to_str().split('ipv4: ')[1]
elif cur_type == 0x01: # 'MAC address'
hwaddr = t.value_to_str()
l('{:17} {:15} {:10} \''.format(hwaddr, ipv4, model) + hostname + '\'')
def print_everything(packet: UbntTLV) -> None:
"""Most verbose output for discovery packets"""
for t in packet.TLVs:
l('{:26}: {:}'.format(lookup_tlv_type(t.Type), t.value_to_str()))
def print_edge_detail_style(packet: UbntTLV) -> None:
"""Print a discovery result mimicking ERs show ubnt discovery detail output"""
hostname: str = ''
hwaddr: str = ''
ipv4: str = ''
product: str = ''
fwversion: str = ''
uptime: str = ''
addresses: str = ''
for t in packet.TLVs:
cur_type: int = t.Type
if cur_type == 0x0b: # hostname
hostname = t.value_to_str()
elif cur_type == 0x0c: # model name
product = t.value_to_str()
elif cur_type == 0x02: # 'MAC address and IP address'
addresses += '\n ' + t.value_to_str()
ipv4 = t.value_to_str().split('ipv4: ')[1]
elif cur_type == 0x01: # 'MAC address'
hwaddr = t.value_to_str()
elif cur_type == 0x0a: # 'uptime'
uptime = t.value_to_str()
elif cur_type == 0x03: # 'firmware version'
fwversion = t.value_to_str()
l('hostname: ' + hostname)
l('hwaddr: ' + hwaddr)
l('ipv4: ' + ipv4)
l('product: ' + product)
l('fwversion: ' + fwversion)
l('uptime: ' + uptime)
l('addresses:' + addresses)
def init_socket() -> None:
"""Initialize the socket"""
try:
global SOCKET, GLOBAL_TIMEOUT
d('Creating socket...')
SOCKET = socket.socket(type=socket.SOCK_DGRAM)
d('Making socket broadcast capable...')
SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
d('Binding socket to port 10001...')
SOCKET.bind(('', 10001))
if GLOBAL_TIMEOUT == 0:
d('No timeout.')
else:
d('Set timeout to ' + str(GLOBAL_TIMEOUT) + 's...')
SOCKET.settimeout(GLOBAL_TIMEOUT)
d('Socket setup done.')
except OSError as r:
e('Could not create socket: ' + str(r))
exit(-1)
except Exception as ex:
e('Error during socket setup: ' + str(ex))
exit(-1)
def create_answer_packet():
"""Creates a legit packet for discovery"""
my_hostname: str = socket.gethostname()
my_uptime: int = int(time() - BOOT_TIME)
d('Hostname is ' + str(my_hostname))
d('Uptime is ' + str(my_uptime))
result: UbntTLV = UbntTLV()
result.Opcode = 0
result.Version = 1
data: UbntTuple = UbntTuple()
data.Type = 0x0a
data.Value = pack('!I', my_uptime)
result.TLVs.append(data)
data: UbntTuple = UbntTuple()
data.Type = 0x0b
data.Value = my_hostname.encode(encoding='iso-8859-1')
result.TLVs.append(data)
data: UbntTuple = UbntTuple()
data.Type = 0x01
data.Value = get_mac().to_bytes(length=6, byteorder='big')
result.TLVs.append(data)
data: UbntTuple = UbntTuple()
data.Type = 0x02
data.Value = get_mac().to_bytes(length=6, byteorder='big') + \
b''.join(map(lambda x: int(x).to_bytes(length=1, byteorder='big'),
socket.gethostbyname(socket.gethostname()).split('.')))
result.TLVs.append(data)
data: UbntTuple = UbntTuple()
data.Type = 0x03
data.Value = platform().encode(encoding='iso-8859-1')
result.TLVs.append(data)
d('Prepared for outgoing:')
d(str(result))
return result
def server():
"""Main server function"""
global GLOBAL_TIMEOUT
timeout: int = time() + GLOBAL_TIMEOUT
try:
while True:
if GLOBAL_TIMEOUT != 0 and time() >= timeout:
l('Timeout reached, exiting.')
break
try:
data, came_from = SOCKET.recvfrom(2048)
except socket.timeout:
d('Timeout reached')
break
except Exception as ex:
e('Could not receive incoming packets: ' + str(ex))
d('Incoming packet from ' + str(came_from))
parsed_packet: UbntTLV = None
try:
parsed_packet = UbntTLV(data)
except Exception as r:
e('Malformed packet: ' + str(r))
if parsed_packet is not None:
d('Received version ' + str(parsed_packet.Version) + ', opcode ' + str(parsed_packet.Opcode))
if parsed_packet.Version == 1 and parsed_packet.Opcode == 0:
l('Received query from ' + str(came_from) + '. Answering.')
answer: UbntTLV = create_answer_packet()
SOCKET.sendto(answer.to_byte_array(), came_from)
else:
d('Received non discovery request packet: \n' + str(parsed_packet))
else:
d('Received malformed packet: ' + str(data))
except KeyboardInterrupt:
l('Goodbye')
except Exception as err:
e('Uncaught exception in server mode: ' + str(err))
exit(-1)
def client() -> None:
"""Main client function"""
global GLOBAL_TIMEOUT, DISPLAY_MODE
discovery_request: UbntTLV = UbntTLV()
d('Sending a discovery request to broadcast.')
SOCKET.sendto(discovery_request.to_byte_array(), ('255.255.255.255', 10001))
d('Sent.')
d('Sending a discovery request to multicast.')
SOCKET.sendto(discovery_request.to_byte_array(), ('233.89.188.1', 10001))
d('Sent.')
received_packets: List[UbntTLV] = []
timeout: int = time() + GLOBAL_TIMEOUT
while True:
data: bytearray = None
came_from: Tuple[str, int] = ('', 0)
try:
data, came_from = SOCKET.recvfrom(2048)
except socket.timeout:
d('Timeout reached')
break
except Exception as ex:
e('Could not receive incoming packets: ' + str(ex))
d('Incoming packet from ' + str(came_from))
parsed_packet: UbntTLV = None
try:
parsed_packet = UbntTLV(data)
except Exception as r:
e('Malformed packet: ' + str(r))
if parsed_packet is not None:
d('Received version ' + str(parsed_packet.Version) + ', opcode ' + str(parsed_packet.Opcode))
if parsed_packet.Opcode in (0, 6) and len(parsed_packet.TLVs) > 2:
received_packets.append(parsed_packet)
else:
d('Received non discovery response packet: \n' + str(parsed_packet))
else:
d('Received malformed packet: ' + str(data))
if timeout < time():
d('Timeout reached. Exiting loop.')
break
received_unique_packets: List[UbntTLV] = []
for i in received_packets:
found: bool = False
for i2 in received_unique_packets:
if i.identifier() == i2.identifier():
d('Found duplicate announcement.')
found = True
break
if not found:
received_unique_packets.append(i)
l('Discovered ' + str(len(received_unique_packets)) + ' devices:')
if DISPLAY_MODE == 'edge':
l('----------------------------------------------------')
for unique_device in received_unique_packets:
print_edge_detail_style(unique_device)
l('----------------------------------------------------')
l('')
elif DISPLAY_MODE == 'oneline':
l('{:17} {:15} {:10} '.format('Hardware Address', 'IP address', 'Model') + 'hostname')
for unique_device in received_unique_packets:
print_one_line(unique_device)
elif DISPLAY_MODE == 'everything':
l('----------------------------------------------------')
for unique_device in received_unique_packets:
print_everything(unique_device)
l('----------------------------------------------------')
def usage() -> None:
"""Show usage"""
l('Ubiquiti Discovery Tool\n -h Help\n -m Display mode: oneline, everything, edge\n' +
' -v Verbose\n -s Server mode\n -c Client mode\n -B Seconds since boot for server mode')
if __name__ == '__main__':
opts: Tuple = None
try:
opts, args = getopt.getopt(sys.argv[1:], "scB:hvt:m:")
except getopt.GetoptError:
e('Wrong usage!')
exit(-1)
set_debugging(False)
DISPLAY_MODE = None
MODE = None
GLOBAL_TIMEOUT = None
BOOT_TIME = None
for o, a in opts:
if o == '-v':
set_debugging(True)
d('Debugging enabled')
elif o == '-m':
if DISPLAY_MODE is not None or a not in ('edge', 'everything', 'oneline'):
e('Display mode must be EITHER edge, everything or oneline.')
exit(-1)
DISPLAY_MODE = a
d('Display mode set to ' + a)
elif o == '-t':
if not str(a).isdigit():
e('Timeout must be a number.')
exit(-1)
else:
GLOBAL_TIMEOUT = int(a)
d('Timeout set to ' + a)
elif o == '-h':
usage()
exit(0)
elif o == '-s':
if MODE is not None:
e('Mode must be EITHER client or server.')
exit(-1)
else:
MODE = 'server'
elif o == '-c':
if MODE is not None:
e('Mode must be EITHER client or server.')
exit(-1)
else:
MODE = 'client'
elif o == '-B':
if not str(a).isdigit():
e('Boot time must be a number.')
exit(-1)
else:
BOOT_TIME = int(a)
d('Boot time set to ' + a)
if GLOBAL_TIMEOUT is None:
GLOBAL_TIMEOUT = 11
d('Defaulting timeout to ' + str(GLOBAL_TIMEOUT))
if DISPLAY_MODE is None:
DISPLAY_MODE = 'oneline'
d('Defaulting to display mode ' + DISPLAY_MODE)
if MODE is None:
MODE = 'client'
d('Defaulting to mode ' + MODE)
init_socket()
if MODE == 'client':
if GLOBAL_TIMEOUT == 0:
e('Timeout of 0 is invalid for client mode.')
exit(-1)
d('Launching client mode.')
client()
elif MODE == 'server':
if BOOT_TIME is None:
d('Saving current time as server boot time.')
BOOT_TIME = time()
d('Boot time is ' + str(BOOT_TIME))
d('Launching server mode.')
server()
|
# -*- coding: utf-8 -*-
{
'author': u'Blanco Martín & Asociados',
'category': 'Localization/Chile',
'depends': ['l10n_cl_invoice'],
"external_dependencies": {
'python': [
'xmltodict',
'base64'
]
},
'description': u'''\n\nDTE CAF File Data Model\n\n''',
'installable': True,
'license': 'AGPL-3',
'name': 'CAF Container for DTE Compliance',
'test': [],
'data': [
'views/dte_caf.xml',
'security/ir.model.access.csv',
],
'update_xml': [],
'version': '0.0',
'website': 'http://blancomartin.cl',
'auto-install': False,
'active': False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
import click
import os
import json
from jinja2 import Environment, FileSystemLoader
from .phlexstructure import TreeStructure, PageData, split_path
from .phlexparsers import YAMLDownParser
import sys
PHLEX_VERSION = '1.0.0'
@click.command()
@click.option('--config', '-c', default=None, help='Path to configuration file')
@click.option('--source', '-s', default='{}'.format(os.path.join('src', 'pages')), help='Path to source page files')
@click.option('--templates', '-t', default='{}'.format(os.path.join('src', 'templates')), help='Path to template files')
@click.option('--default-template', '-T', default=None, help='Name of default template to use')
@click.option('--output', '-o', default='{}'.format(os.path.join('dist')), help='Path to put completed files')
@click.option('--version', is_flag=True)
def main(config, source, templates, default_template, output, version):
"""Flexible static HTML builder"""
if version:
print("Phlex version {}".format(PHLEX_VERSION))
quit()
settings = {
"PAGES": source,
"TEMPLATES": templates,
"DEFAULT_TEMPLATE": default_template,
"OUTPUT": output
}
if config and os.path.exists(config):
with open(config, 'r') as settings_json_file:
setting_file = json.loads(settings_json_file.read())
for key, value in setting_file.items():
settings[key] = value
page_parsers = {
'.yd': YAMLDownParser
}
tree = TreeStructure(settings['PAGES'])
tree.crawl()
if not os.path.exists(settings['OUTPUT']):
os.makedirs(settings['OUTPUT'])
env = Environment(
loader=FileSystemLoader(settings['TEMPLATES']))
for page in tree.pages():
parser = page_parsers[page.file_type](page, tree)
page.assign_parser(page_parsers[page.file_type], tree)
with click.progressbar(tree.pages()) as bar:
for page in bar:
page.parser.build_page()
path = list(page.path)
del path[-1]
path.insert(0, settings['OUTPUT'])
path.append(page.filename + '.html')
# get template
template = env.get_template(page.context['template'] + '.html')
# template.globals['context'] = get_context
# template.globals['callable'] = callable
# render
page_output = template.render(**page.context, body=page.body)
# path safety: build the path to the page if it does not exist
if not os.path.exists(os.path.join(*path[0:-1])):
os.makedirs(os.path.join(*path[0:-1]))
# save to file
output_file_name = os.path.join(*path)
with open(output_file_name, 'w') as write_page:
write_page.write(page_output)
|
n = int(input('Digite o primeiro termo da PA: '))
razao = int(input('Digite a razão da PA: '))
ultimo = n + (10 - 1) * razao
print('Os 10 primeiros termos da PA são: ', end='')
for i in range(n, razao+ultimo, razao):
print('{} '.format(i), end=' - ')
|
import asyncio
import socket
clients = []
async def handle_client(client, address):
data = f"Please welcome our new chat member from {address}"
for _client in clients:
await loop.sock_sendall(_client, data.encode('utf8'))
while data != 'BYEBYE':
data = (await loop.sock_recv(client, 1024)).decode('utf8')
message = f'From {address}: ' + str(data)
if data == 'BYEBYE':
message = f'User {address} has left the chat'
for _client in clients:
if _client == client:
continue
await loop.sock_sendall(_client, message.encode('utf8'))
clients.remove(client)
print(f'Client: {address} disconnected')
client.close()
async def run_server():
while True:
client, address = await loop.sock_accept(server)
if client not in clients:
clients.append(client)
print(f'Client with address {address} connected to the chat!')
loop.create_task(handle_client(client, address))
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('localhost', 1337))
server.listen(8)
server.setblocking(False)
loop = asyncio.get_event_loop()
loop.run_until_complete(run_server())
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for the pubsubhubbub_publish module."""
__author__ = 'bslatkin@gmail.com (Brett Slatkin)'
import BaseHTTPServer
import urllib
import unittest
import threading
import pubsubhubbub_publish
REQUESTS = 0
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
global REQUESTS
print 'Accessed', self.path
REQUESTS += 1
length = int(self.headers.get('content-length', 0))
if not length:
return self.send_error(500)
body = self.rfile.read(length)
if self.path == '/single':
if body != urllib.urlencode(
{'hub.url': 'http://example.com/feed', 'hub.mode': 'publish'}):
self.send_error(500)
self.wfile.write('Bad body. Found:')
self.wfile.write(body)
else:
self.send_response(204)
elif self.path == '/multiple':
if body != urllib.urlencode(
{'hub.url': ['http://example.com/feed',
'http://example.com/feed2',
'http://example.com/feed3'],
'hub.mode': 'publish'}, doseq=True):
self.send_error(500)
self.wfile.write('Bad body. Found:')
self.wfile.write(body)
else:
self.send_response(204)
elif self.path == '/batch':
self.send_response(204)
elif self.path == '/fail':
self.send_error(400)
self.wfile.write('bad argument')
else:
self.send_error(404)
class PublishTest(unittest.TestCase):
def setUp(self):
global REQUESTS
REQUESTS = 0
self.server = BaseHTTPServer.HTTPServer(('', 0), RequestHandler)
t = threading.Thread(target=self.server.serve_forever)
t.setDaemon(True)
t.start()
self.hub = 'http://%s:%d' % (
self.server.server_name, self.server.server_port)
self.feed = 'http://example.com/feed'
self.feed2 = 'http://example.com/feed2'
self.feed3 = 'http://example.com/feed3'
def testSingle(self):
pubsubhubbub_publish.publish(self.hub + '/single', self.feed)
self.assertEquals(1, REQUESTS)
def testMultiple(self):
pubsubhubbub_publish.publish(self.hub + '/multiple',
self.feed, self.feed2, self.feed3)
def testList(self):
pubsubhubbub_publish.publish(self.hub + '/multiple',
[self.feed, self.feed2, self.feed3])
def testIterable(self):
pubsubhubbub_publish.publish(self.hub + '/multiple',
iter([self.feed, self.feed2, self.feed3]))
def testBatchSizeLimit(self):
old = pubsubhubbub_publish.URL_BATCH_SIZE
try:
pubsubhubbub_publish.URL_BATCH_SIZE = 2
pubsubhubbub_publish.publish(self.hub + '/batch',
[self.feed, self.feed2, self.feed3])
finally:
pubsubhubbub_publish.URL_BATCH_SIZE = old
self.assertEquals(2, REQUESTS)
def testBadHubHostname(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
'http://asdf.does.not.resolve', self.feed)
def testBadArgument(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
self.hub + '/fail', self.feed)
def testBadHubUrl(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
'not://a.url.is.this', self.feed)
def testNotFound(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
self.hub + '/unknown', self.feed)
if __name__ == '__main__':
unittest.main()
|
'''
https://en.wikipedia.org/wiki/Dutch_national_flag_problem
procedure three-way-partition(A : array of values, mid : value):
i ← 0
j ← 0
k ← size of A - 1
while j <= k:
if A[j] < mid:
swap A[i] and A[j]
i ← i + 1
j ← j + 1
else if A[j] > mid:
swap A[j] and A[k]
k ← k - 1
else:
j ← j + 1
'''
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
import zipfile
from attrdict import AttrDict
from pprint import pprint, pformat
from fnmatch import fnmatch
from authority.base import AuthorityBase
from utils.dictionary import merge
from app import app
from config import CFG
def not_200(call):
return call.recv.status != 200
class LetsEncryptAuthority(AuthorityBase):
def __init__(self, ar, cfg, verbosity):
super(LetsEncryptAuthority, self).__init__(ar, cfg, verbosity)
def display(self, bundle_name):
raise NotImplementedError
def renew_certificate(self, bundle_name):
raise NotImplementedError
def revoke_certificate(self, bundle_name):
raise NotImplementedError
|
import csv
import datetime
import os
from metricfarmer.exceptions import ExtensionException
def target_file_csv(metrics, **kwargs):
if 'path' not in kwargs:
raise ExtensionException('Path parameter must be specified for mf.file_csv')
path = kwargs['path']
override = kwargs.get('override', False)
delimiter = kwargs.get('delimiter', ',')
orig_data = {}
if os.path.exists(path) and not override:
with open(path, 'r') as csv_file:
reader = csv.DictReader(csv_file, delimiter=delimiter)
headers = reader.fieldnames
for row in reader:
orig_data[row['metric']] = row
else:
headers = ['metric']
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
updated_data = orig_data
for name, metric in metrics.items():
if name in updated_data.keys():
updated_data[name][timestamp] = metric['result']
else:
updated_data[name] = {'metric': name, timestamp: metric['result']}
# Update headers with newest timestamp for current data
headers.append(timestamp)
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, headers, delimiter=delimiter)
writer.writeheader()
writer.writerows(updated_data.values())
|
from task.models import Task
from django.db import models
from django.forms import ModelForm
from .models import Task
from django import forms
class TaskCreateForm(forms.Form):
'''
user create his task by using this form
'''
name = forms.CharField(widget=forms.Textarea,
label='Task name', max_length=300)
done = forms.BooleanField(required=False, label='Completed')
class TaskUpdateForm(ModelForm):
'''
user edit his task by using this form
'''
class Meta:
model = Task
fields = ['name', 'done']
|
from .base import *
class CustomerGroups(ListableApiResource, CreateableApiResource, UpdateableApiResource, DeleteableApiResource):
resource_name = 'customer_groups'
|
from datetime import datetime
from decimal import Decimal
from uuid import uuid1
HOME_TEAM_NAME = "Drip Bayless"
def fantasy_comparison_response_transformer(comparison):
home_team_stats = comparison[HOME_TEAM_NAME]
away_team_name = list(comparison.keys())[-1]
away_team_stats = comparison[away_team_name]
return {
"id": str(uuid1()),
"inserted_at": str(datetime.utcnow()),
"home_team_name": HOME_TEAM_NAME,
"home_team_stats": {k: Decimal(str(v)) for k, v in home_team_stats.items()},
"away_team_name": away_team_name,
"away_team_stats": {k: Decimal(str(v)) for k, v in away_team_stats.items()},
}
|
""" Copyright start
Copyright (C) 2008 - 2021 Fortinet Inc.
All rights reserved.
FORTINET CONFIDENTIAL & FORTINET PROPRIETARY SOURCE CODE
Copyright end """
import time, os
import base64
from base64 import b64encode
from integrations.crudhub import make_request, make_file_upload_request
from connectors.cyops_utilities.builtins import download_file_from_cyops
from connectors.core.connector import get_logger, ConnectorError
from .utils import QUERY_SCHEMA, FortiSandbox
from django.conf import settings
logger = get_logger('fortisandbox')
def get_epoch(_date):
try:
pattern = '%Y-%m-%dT%H:%M:%S.%fZ'
return int(time.mktime(time.strptime(_date, pattern)))
except Exception as Err:
logger.exception('get_epoch: Exception occurred [{0}]'.format(str(Err)))
raise ConnectorError('get_epoch: Exception occurred [{0}]'.format(str(Err)))
def handle_params(params):
try:
if params.get('input_type') == 'Attachment ID':
iri = params.get('attachment_iri')
if not iri.startswith('/api/3/attachments/'):
iri = '/api/3/attachments/{0}'.format(iri)
elif params.get('input_type') == 'Indicator IRI':
iri = params.get('indicator_iri')
if not iri.startswith('/api/3/indicators/'):
iri = '/api/3/indicators/{0}'.format(iri)
response = make_request(iri, 'GET')
return response['file']['@id'], response['file']['filename']
except Exception as err:
logger.exception('handle_params(): Exception occurred {0}'.format(err))
raise ConnectorError('Invalid attachment/indicator iri {0}'.format(iri))
def create_cyops_attachment(content, attachment_name, file_name):
try:
file_resp = make_file_upload_request(file_name, content, 'application/octet-stream')
description = 'FortiSandbox: {0}'.format(file_name)
payload = {'name': attachment_name, 'file': file_resp['@id'], 'description': description}
return make_request('/api/3/attachments', 'POST', payload)
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
def _check_health(config):
try:
response = get_system_status(config, {})
if not response['result']['status']['message'] == 'OK':
raise ConnectorError(response['result']['status']['message'])
except Exception as err:
logger.exception(str(err))
raise ConnectorError('{0}'.format(err))
def submit_file(config, params):
forti = FortiSandbox(config)
try:
file_iri, filename = handle_params(params)
dw_file_md = download_file_from_cyops(file_iri)
tmp_file_path = dw_file_md.get('cyops_file_path')
file_name = dw_file_md.get('filename')
if len(file_name) == 0 and len(tmp_file_path) > 0:
new_name = tmp_file_path.split('/')
if len(new_name) == 3:
file_name = new_name[2]
else:
file_name = tmp_file_path
file_path = os.path.join(settings.TMP_FILE_ROOT, tmp_file_path)
with open(file_path, 'rb') as attachment:
file_data = attachment.read()
test_input = QUERY_SCHEMA.get('file_upload')
test_input = forti._load_file_for_upload(file_data, test_input, filename)
test_input['params'][0]['overwrite_vm_list'] = params['overwrite_vm_list']
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def submit_urlfile(config, params):
forti = FortiSandbox(config)
try:
urls = params['url']
if isinstance(urls, str):
urls = urls.split(',')
urls_value = '\n'.join(urls).replace(' ', '')
test_input = QUERY_SCHEMA.get('file_upload_url')
test_input = forti._load_file_for_upload(urls_value, test_input, 'auto_submitted_urls')
test_input['params'][0]['overwrite_vm_list'] = params['overwrite_vm_list']
test_input['params'][0]['timeout'] = '60' if params['timeout'] < 0 else str(params['timeout'])
test_input['params'][0]['depth'] = '1' if params['depth'] else '0'
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_system_status(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get_status')
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_scan_stats(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get_scan_stats')
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_submission_job_list(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get-jobs-of-submission')
test_input['params'][0]['sid'] = str(params['sid'])
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_scan_result_job(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get_job_verdict')
test_input['params'][0]['jid'] = str(params['jid'])
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_file_rating(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get_file_rating')
test_input['params'][0]['ctype'] = params['hash_type'].lower()
test_input['params'][0]['checksum'] = params['file_hash']
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_url_rating(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get_url_rating')
test_input['params'][0]['address'] = params['url'] if isinstance(params['url'], list) else [params['url']]
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_job_behaviour(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get-job-behavior')
test_input['params'][0]['ctype'] = params['hash_type'].lower()
test_input['params'][0]['checksum'] = params['file_hash']
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def cancel_submission(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('cancel-submission')
test_input['params'][0]['sid'] = str(params['sid'])
test_input['params'][0]['reason'] = params['reason']
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def handle_white_black_list(config, params):
forti = FortiSandbox(config)
try:
indicator_value = params.get('indicator_value', '')
indicator_type = params['indicator_type'].lower()
if indicator_type == 'url regex':
indicator_type = 'url_regex'
if not indicator_value:
indicator_value = ['test']
indicator_value = indicator_value if isinstance(indicator_value, list) else [indicator_value]
indicator_value = '\n'.join(indicator_value)
test_input = QUERY_SCHEMA.get('white-black-list')
test_input['params'][0]['list_type'] = params['list_type'].lower()
test_input['params'][0]['checksum_type'] = indicator_type
test_input['params'][0]['action'] = params['action'].lower()
test_input['params'][0]['upload_file'] = b64encode(indicator_value.encode()).decode()
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
if params['action'].lower() == 'download':
if not response['result']['status']['message'] == 'OK':
return response
download_file = response['result']['data']['download_file']
if download_file:
filename = '{0}_{1}.txt'.format(params['list_type'].lower(), params['indicator_type'].lower())
attachment_name = 'FortiSandbox: Download {0} {1}'.format(params['list_type'], params['indicator_type'])
return create_cyops_attachment(base64.b64decode(download_file.encode('utf-8')), attachment_name,
filename)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def mark_sample_fp_fn(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('mark-sample-fp-fn')
test_input['params'][0]['jid'] = str(params['jid'])
test_input['params'][0]['comments'] = params['comments']
test_input['params'][0]['cloud_submit'] = 0
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_avrescan(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get-avrescan')
test_input['params'][0]['stime'] = get_epoch(params['stime'])
test_input['params'][0]['etime'] = get_epoch(params['etime'])
test_input['params'][0]['need_av_ver'] = 1 if params['need_av_ver'] else 0
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_file_verdict(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get-multiple-file-verdict')
test_input['params'][0]['ctype'] = params['hash_type'].lower()
test_input['params'][0]['checksum'] = params['file_hash'] if (isinstance(params['file_hash'], list)) else [
params['file_hash']]
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_installed_vm(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get-all-installed-vm')
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def get_pdf_report(config, params):
forti = FortiSandbox(config)
try:
test_input = QUERY_SCHEMA.get('get-pdf-report')
test_input['params'][0]['qtype'] = params['qtype']
test_input['params'][0]['qval'] = params['qval']
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
if not response['result']['status']['message'] == 'OK':
return response
report_details = response['result']['data']
report_name, report_data = report_details.get('report_name'), report_details.get('report')
attachment_name = 'FortiSandbox: Report'
return create_cyops_attachment(base64.b64decode(report_data.encode('utf-8')), attachment_name, report_name)
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
def download_hashes_url_from_mwpkg(config, params):
forti = FortiSandbox(config)
type_map = {'SHA256': 0, 'SHA1': 1, 'MD5': 2, 'URL': 3}
test_input = QUERY_SCHEMA.get('download-malpkg')
lazy = 1 if params['lazy'] else 0
if lazy == 0:
major, minor = params['major'], params['minor']
test_input['params'][0]['major'] = major
test_input['params'][0]['minor'] = minor
try:
test_input['params'][0]['lazy'] = lazy
test_input['params'][0]['type'] = type_map[params['type']]
test_input['session'] = forti.session_id
response = forti._handle_post(test_input)
if response['result']['status']['message'] == 'OK':
file = response['result']['data']['download_file']
response['result']['data']['download_file'] = base64.b64decode(file.encode('utf-8')).decode()[:-1]
return response
except Exception as e:
logger.exception(str(e))
raise ConnectorError(e)
finally:
forti.logout()
operations = {
'submit_file': submit_file,
'submit_urlfile': submit_urlfile,
'get_system_status': get_system_status,
'get_scan_stats': get_scan_stats,
'get_submission_job_list': get_submission_job_list,
'get_scan_result_job': get_scan_result_job,
'get_file_rating': get_file_rating,
'get_url_rating': get_url_rating,
'get_file_verdict': get_file_verdict,
'get_job_behaviour': get_job_behaviour,
'cancel_submission': cancel_submission,
'handle_white_black_list': handle_white_black_list,
'mark_sample_fp_fn': mark_sample_fp_fn,
'get_avrescan': get_avrescan,
'get_installed_vm': get_installed_vm,
'get_pdf_report': get_pdf_report,
'download_hashes_url_from_mwpkg': download_hashes_url_from_mwpkg
}
|
# Check whether matplotlib imports cleanly
import matplotlib
|
import logging
from okdata.sdk import SDK
log = logging.getLogger()
class Status(SDK):
def __init__(self, config=None, auth=None, env=None):
self.__name__ = "status"
super().__init__(config, auth, env)
def get_status(self, uuid, retries=0):
url = self.config.get("statusApiUrl")
log.info(f"Retrieving status for UUID={uuid} from: {url}")
response = self.get(f"{url}/{uuid}", retries=retries)
response.raise_for_status()
return response.json()
def update_status(self, trace_id, data, retries=0):
url = self.config.get("statusApiUrl")
response = self.post(f"{url}/{trace_id}", data, retries=retries)
response.raise_for_status()
return response.json()
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
setup(
name='ensure',
version='1.0.2',
url='https://github.com/kislyuk/ensure',
license='Apache Software License',
author='Andrey Kislyuk',
author_email='kislyuk@gmail.com',
description='Literate BDD assertions in Python with no magic',
long_description=open('README.rst').read(),
python_requires='>=3.5',
install_requires=['six >= 1.11.0'],
extras_require={
'test': ['coverage', 'flake8']
},
packages=find_packages(exclude=['test']),
include_package_data=True,
platforms=['MacOS X', 'Posix'],
test_suite='test',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-15 18:57
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('lookup_tables', '0006_auto_20170215_0857'),
('downed_wildlife_monitoring', '0018_auto_20170214_1241'),
]
operations = [
migrations.CreateModel(
name='SEEFReporting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('trial_date', models.DateField(default=django.utils.timezone.now)),
('loc', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326)),
('notes', models.TextField(blank=True, null=True)),
('species', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lookup_tables.SpeciesDef')),
('turbine', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lookup_tables.Infrastructure')),
],
),
migrations.AddField(
model_name='seefmaster',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='weop',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='caremonitoring',
name='monitor_date',
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='caremonitoring',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='caresetup',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='downedwildlifemonitoring',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='downedwildlifemonitoring',
name='photo_as_found',
field=models.ImageField(blank=True, null=True, upload_to=b''),
),
migrations.AlterField(
model_name='downedwildlifemonitoring',
name='photo_as_found2',
field=models.ImageField(blank=True, null=True, upload_to=b''),
),
migrations.AlterField(
model_name='downedwildlifemonitoring',
name='photo_injury',
field=models.ImageField(blank=True, null=True, upload_to=b''),
),
migrations.AlterField(
model_name='downedwildlifemonitoring',
name='photo_other',
field=models.ImageField(blank=True, null=True, upload_to=b''),
),
migrations.AlterField(
model_name='downedwildlifemonitoring',
name='photo_structure',
field=models.ImageField(blank=True, null=True, upload_to=b''),
),
migrations.AlterField(
model_name='downedwildlifemonitoring',
name='time_collected',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='downedwildlifemonitoring',
name='time_reported',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='downedwildlifemonitoring',
name='time_responded',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='kwpiisearching',
name='monitor_date',
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='kwpiisearching',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='kwpisearching',
name='monitor_date',
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='kwpisearching',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='nenesurvey',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wacardswap',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wacardswap',
name='swap_date',
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='weop',
name='obs_date',
field=models.DateField(default=django.utils.timezone.now),
),
]
|
import cv2
import os
os.chdir('C:\Python27\Lib\site-packages\pytesser')
from PIL import Image
from pytesseract import *
import re
import time
from Spellcheck import correction,is_english_word
image = cv2.imread("C:/Users/Muhammad/Downloads/Design/1 Vocable/Images/OCR2.jpg")
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) # grayscale
_,thresh = cv2.threshold(gray,120,255,cv2.THRESH_BINARY_INV) # threshold
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(4,4))
dilated = cv2.dilate(thresh,kernel,iterations = 12) # dilate
_, contours, hierarchy = cv2.findContours(dilated,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) # get contours
arr = []
# for each contour found, draw a rectangle around it on original image
for contour in contours:
# get rectangle bounding contour
[x,y,w,h] = cv2.boundingRect(contour)
# # discard areas that are too large
# if h>400 and w>400:
# continue
# discard areas that are too small
if h<20 or w<20:
continue
roi = gray[y:y + h, x:x + w]
arr.append(roi)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 0), 2)
cv2.imshow("contoured.jpg", image)
cv2.waitKey(0)
tx = ""
arr = arr[::-1]
t0 = time.time()
for im in arr:
thresh, im = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
r = 1000.0 / im.shape[1]
dim = (1000, int(im.shape[0] * r))
# perform the actual resizing of the image and show it
im = cv2.resize(im, dim, interpolation=cv2.INTER_AREA )
cv2.imshow("pic.jpg", im)
cv2.waitKey(0)
im = Image.fromarray(im)
txt = image_to_string(im)
words = (re.split("[^0-9a-zA-Z]*", txt))
for w in words:
if is_english_word(w):
tx += w + " "
else:
tx += correction(w) + " "
print(tx)
t1 = time.time()
print t1 - t0
|
''' Application configurables
+ user_info
+ commands
'''
#%% User info
''' Define user information in 'account_info'.
When the application needs user-unique information,
it'll refer to this object to get login information
to email server and Robinhood.
To get value for 'phone_address', send a text message
to your email and see where it was sent from. This
application will only accept instructions from the
address define in 'phone_address'.
'''
user_info = {
'phone_address':'', # your phone address -- bot will only read commands from this number
'email':{
'server':'', # 'gmail.com', 'outlook.com'
'username':'', # email address of bot
'password':''
},
'robinhood':{
'username':'',
'password':''
},
}
#%% User defined commands
''' Define command words to trigger custom instructions.
The application will read the emails sent from the user
and search for defined command words. If the command word
is detected, the application will run the appropriate process.
'''
commands = {
# Get current portfolio holdings
'current_holdings':[
'CURRENT',
'CURRENT HOLDINGS',
],
# Cancel all outstanding orders
'cancel_orders':[
'CANCEL',
'CANCEL ALL',
],
# Limit orders
'limit_order':[
'LIMIT BUY',
'LIMIT SELL',
],
# Open orders
'open_orders':[
'O',
'OPEN',
'OPEN ORDERS',
],
# Instruments -- helps route msg to appropriate function
'instruments':{
'equities':[
'E',
'EQUITY',
'S',
'STOCK'
],
'options':[
'O',
'OPTION',
'OPTIONS'
],
'crypto':[
'C',
'CRYPTO'
]
},
}
|
import dlib
from os import path
from dataflow import ports
import numpy as np
class FaceDetector:
def __init__(self, threshold=0.05):
self.detector = dlib.get_frontal_face_detector()
self.threshold = threshold
self.sink_image = ports.StateSink()
def out_bounds(self):
img = self.sink_image.get()
if img is None:
return
dets, _, _ = self.detector.run(img, 0, self.threshold)
if not dets:
return None
return dets[0]
def _dlib_track_to_array(prediction):
np_pts = np.zeros([68, 2], dtype=np.float32)
for i, p in enumerate(prediction.parts()):
np_pts[i, :] = (p.x, p.y)
return np_pts
class LandmarkDetector:
def __init__(self):
model_dir = path.join(path.dirname(path.abspath(__file__)), 'models')
self.predictor = dlib.shape_predictor(
path.join(model_dir, 'shape_predictor_68_face_landmarks.dat'))
self.sink_image = ports.StateSink()
self.sink_bounds = ports.StateSink()
def __call__(self):
return self.detect_landmarks()
def detect_landmarks(self):
img = self.sink_image.get()
bounds = self.sink_bounds.get()
if img is None or bounds is None:
return None
rect = dlib.rectangle(
int(round(bounds.left())),
int(round(bounds.top())),
int(round(bounds.right())),
int(round(bounds.bottom())))
if min(rect.top(), rect.left()) < 0 or rect.bottom() > img.shape[0] or rect.right() > img.shape[1]:
return None
return _dlib_track_to_array(prediction=self.predictor(img, rect))
def outline(prediction):
return prediction[0:17, :]
def brow_l(prediction):
return prediction[17:22, :]
def brow_r(prediction):
return prediction[22:27, :]
def nose_above(prediction):
return prediction[27:31, :]
def nose_below(prediction):
return prediction[31:36, :]
def eye_l(prediction):
return prediction[36:42, :]
def eye_r(prediction):
return prediction[42:48, :]
def mouth_out(prediction):
return prediction[48:60, :]
def mouth_in(prediction):
return prediction[60:68, :]
def get_all(prediction):
return prediction[0:68, :]
|
"""Squirtle mini-library for SVG rendering in Pyglet.
Example usage:
import squirtle
my_svg = squirtle.SVG('filename.svg')
my_svg.draw(100, 200, angle=15)
"""
from pyglet import gl
try:
import xml.etree.ElementTree
from xml.etree.cElementTree import parse
except:
import elementtree.ElementTree
from elementtree.ElementTree import parse
import math
from ctypes import CFUNCTYPE, POINTER, cast, c_char_p
import re
import sys
import string
from squirtle.gradient import GradientContainer, LinearGradient, RadialGradient
from squirtle.matrix import Matrix, as_c_matrix
from squirtle.parse import parse_color, parse_list, parse_style
BEZIER_POINTS = 20
CIRCLE_POINTS = 24
TOLERANCE = 0.001
xmlns = 'http://www.w3.org/2000/svg'
print(cast(gl.glGetString(gl.GL_SHADING_LANGUAGE_VERSION), c_char_p).value)
tess = gl.gluNewTess()
gl.gluTessNormal(tess, 0, 0, 1)
gl.gluTessProperty(tess, gl.GLU_TESS_WINDING_RULE, gl.GLU_TESS_WINDING_NONZERO)
if sys.platform == 'win32':
from ctypes import WINFUNCTYPE
c_functype = WINFUNCTYPE
else:
c_functype = CFUNCTYPE
callback_types = {gl.GLU_TESS_VERTEX: c_functype(None, POINTER(gl.GLvoid)),
gl.GLU_TESS_BEGIN: c_functype(None, gl.GLenum),
gl.GLU_TESS_END: c_functype(None),
gl.GLU_TESS_ERROR: c_functype(None, gl.GLenum),
gl.GLU_TESS_COMBINE: c_functype(None, POINTER(gl.GLdouble), POINTER(POINTER(gl.GLvoid)), POINTER(gl.GLfloat), POINTER(POINTER(gl.GLvoid)))}
def set_tess_callback(which):
def set_call(func):
cb = callback_types[which](func)
gl.gluTessCallback(tess, which, cast(cb, CFUNCTYPE(None)))
return cb
return set_call
def setup_gl():
"""Set various pieces of OpenGL state for better rendering of SVG."""
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
class SvgPath(object):
def __init__(self, path, stroke, polygon, fill, transform, path_id, title, desc):
self.path = list(path) if path else []
self.stroke = stroke
self.polygon = polygon
self.fill = fill
self.transform = Matrix(transform.values)
self.id = path_id
self.title = title
self.description = desc
print(f"PATH: {self.path}")
def __repr__(self):
return "<SvgPath id=%s title='%s' description='%s' transform=%s>" % (
self.id, self.title, self.description, self.transform
)
class TriangulationError(Exception):
"""Exception raised when triangulation of a filled area fails. For internal use only."""
pass
class SVG(object):
"""Opaque SVG image object.
Users should instantiate this object once for each SVG file they wish to
render.
"""
_disp_list_cache = {}
def __init__(self, filename, anchor_x=0, anchor_y=0, bezier_points=BEZIER_POINTS, circle_points=CIRCLE_POINTS, invert_y=False):
"""Creates an SVG object from a .svg or .svgz file.
`filename`: str
The name of the file to be loaded.
`anchor_x`: float
The horizontal anchor position for scaling and rotations. Defaults to 0. The symbolic
values 'left', 'center' and 'right' are also accepted.
`anchor_y`: float
The vertical anchor position for scaling and rotations. Defaults to 0. The symbolic
values 'bottom', 'center' and 'top' are also accepted.
`bezier_points`: int
The number of line segments into which to subdivide Bezier splines. Defaults to 10.
`circle_points`: int
The number of line segments into which to subdivide circular and elliptic arcs.
Defaults to 10.
"""
self.path_lookup = {}
self.paths = []
self.invert_y = invert_y
self.filename = filename
self.bezier_points = bezier_points
self.circle_points = circle_points
self.bezier_coefficients = []
self.gradients = GradientContainer()
self.generate_disp_list()
self.anchor_x = anchor_x
self.anchor_y = anchor_y
def _set_anchor_x(self, anchor_x):
self._anchor_x = anchor_x
if self._anchor_x == 'left':
self._a_x = 0
elif self._anchor_x == 'center':
self._a_x = self.width * .5
elif self._anchor_x == 'right':
self._a_x = self.width
else:
self._a_x = self._anchor_x
def _get_anchor_x(self):
return self._anchor_x
anchor_x = property(_get_anchor_x, _set_anchor_x)
def _set_anchor_y(self, anchor_y):
self._anchor_y = anchor_y
if self._anchor_y == 'bottom':
self._a_y = 0
elif self._anchor_y == 'center':
self._a_y = self.height * .5
elif self._anchor_y == 'top':
self._a_y = self.height
else:
self._a_y = self.anchor_y
def _get_anchor_y(self):
return self._anchor_y
anchor_y = property(_get_anchor_y, _set_anchor_y)
def generate_disp_list(self):
if (self.filename, self.bezier_points) in self._disp_list_cache:
self.disp_list, self.width, self.height = self._disp_list_cache[self.filename, self.bezier_points]
else:
if open(self.filename, 'rb').read(3) == b'\x1f\x8b\x08': # gzip magic numbers
import gzip
f = gzip.open(self.filename, 'rb')
else:
f = open(self.filename, 'rb')
self.tree = parse(f)
self.parse_doc()
self.disp_list = gl.glGenLists(1)
gl.glNewList(self.disp_list, gl.GL_COMPILE)
self.render_slowly()
gl.glEndList()
self._disp_list_cache[self.filename, self.bezier_points] = (self.disp_list, self.width, self.height)
def draw(self, x, y, z=0, angle=0, scale=1):
"""Draws the SVG to screen.
:Parameters
`x` : float
The x-coordinate at which to draw.
`y` : float
The y-coordinate at which to draw.
`z` : float
The z-coordinate at which to draw. Defaults to 0. Note that z-ordering may not
give expected results when transparency is used.
`angle` : float
The angle by which the image should be rotated (in degrees). Defaults to 0.
`scale` : float
The amount by which the image should be scaled, either as a float, or a tuple
of two floats (xscale, yscale).
"""
gl.glPushMatrix()
gl.glTranslatef(x, y, z)
if angle:
gl.glRotatef(angle, 0, 0, 1)
if scale != 1:
try:
gl.glScalef(scale[0], scale[1], 1)
except TypeError:
gl.glScalef(scale, scale, 1)
if self._a_x or self._a_y:
gl.glTranslatef(-self._a_x, -self._a_y, 0)
gl.glCallList(self.disp_list)
gl.glPopMatrix()
def render_slowly(self):
self.n_tris = 0
self.n_lines = 0
for svgpath in self.paths:
path = svgpath.path
stroke = svgpath.stroke
tris = svgpath.polygon
fill = svgpath.fill
transform = svgpath.transform
if tris:
self.n_tris += len(tris) / 3
g = None
if isinstance(fill, str):
g = self.gradients[fill]
fills = [g.interp(x) for x in tris]
else:
fills = [fill for x in tris]
gl.glPushMatrix()
gl.glMultMatrixf(as_c_matrix(transform.to_mat4()))
if g: g.apply_shader(transform)
gl.glBegin(gl.GL_TRIANGLES)
for vtx, clr in zip(tris, fills):
# vtx = transform(vtx)
if not g:
gl.glColor4ub(*clr)
else:
gl.glColor4f(1, 1, 1, 1)
gl.glVertex3f(vtx[0], vtx[1], 0)
gl.glEnd()
gl.glPopMatrix()
if g: g.unapply_shader()
if path:
for loop in path:
self.n_lines += len(loop) - 1
loop_plus = []
for i in range(len(loop) - 1):
loop_plus += [loop[i], loop[i + 1]]
if isinstance(stroke, str):
g = self.gradients[stroke]
strokes = [g.interp(x) for x in loop_plus]
else:
strokes = [stroke for x in loop_plus]
gl.glPushMatrix()
gl.glMultMatrixf(as_c_matrix(transform.to_mat4()))
gl.glBegin(gl.GL_LINES)
for vtx, clr in zip(loop_plus, strokes):
# vtx = transform(vtx)
gl.glColor4ub(*clr)
gl.glVertex3f(vtx[0], vtx[1], 0)
gl.glEnd()
gl.glPopMatrix()
def parse_float(self, txt):
if txt.endswith('px'):
return float(txt[:-2])
else:
return float(txt)
def parse_doc(self):
self.paths = []
self.width = self.parse_float(self.tree._root.get("width", '0'))
self.height = self.parse_float(self.tree._root.get("height", '0'))
if self.height:
if self.invert_y:
self.transform = Matrix([1, 0, 0, -1, 0, self.height])
else:
self.transform = Matrix([1, 0, 0, 1, 0, 0])
else:
x, y, w, h = (self.parse_float(x) for x in parse_list(self.tree._root.get("viewBox")))
if self.invert_y:
self.transform = Matrix([1, 0, 0, -1, 0, 0])
else:
self.transform = Matrix([1, 0, 0, 1, 0, 0])
self.height = h
self.width = w
self.opacity = 1.0
for e in self.tree._root.getchildren():
try:
self.parse_element(e)
except Exception as ex:
print(f'Exception while parsing element: {e}')
raise
def parse_element(self, e):
default = object()
self.fill = parse_color(e.get('fill'), default)
self.stroke = parse_color(e.get('stroke'), default)
oldopacity = self.opacity
self.opacity *= float(e.get('opacity', 1))
fill_opacity = float(e.get('fill-opacity', 1))
stroke_opacity = float(e.get('stroke-opacity', 1))
self.path_id = e.get('id', '')
self.path_title = e.findtext('{%s}title' % (xmlns,))
self.path_description = e.findtext('{%s}desc' % (xmlns,))
oldtransform = self.transform
self.transform = self.transform * Matrix(e.get('transform'))
style = e.get('style')
if style:
sdict = parse_style(style)
if 'fill' in sdict:
self.fill = parse_color(sdict['fill'])
if 'fill-opacity' in sdict:
fill_opacity *= float(sdict['fill-opacity'])
if 'stroke' in sdict:
self.stroke = parse_color(sdict['stroke'])
if 'stroke-opacity' in sdict:
stroke_opacity *= float(sdict['stroke-opacity'])
if self.fill == default:
self.fill = [0, 0, 0, 255]
if self.stroke == default:
self.stroke = [0, 0, 0, 0]
if isinstance(self.stroke, list):
self.stroke[3] = int(self.opacity * stroke_opacity * self.stroke[3])
if isinstance(self.fill, list):
self.fill[3] = int(self.opacity * fill_opacity * self.fill[3])
if isinstance(self.stroke, list) and self.stroke[3] == 0: self.stroke = self.fill # Stroked edges antialias better
if e.tag.endswith('path'):
pathdata = e.get('d', '')
pathdata = re.findall("([A-Za-z]|-?[0-9]+\.?[0-9]*(?:e-?[0-9]*)?)", pathdata)
def pnext():
return (float(pathdata.pop(0)), float(pathdata.pop(0)))
self.new_path()
opcode = ''
while pathdata:
prev_opcode = opcode
if pathdata[0] in string.ascii_letters:
opcode = pathdata.pop(0)
else:
opcode = prev_opcode
if opcode == 'M':
self.set_position(*pnext())
elif opcode == 'm':
mx, my = pnext()
self.set_position(self.x + mx, self.y + my)
elif opcode == 'C':
self.curve_to(*(pnext() + pnext() + pnext()))
elif opcode == 'c':
mx = self.x
my = self.y
x1, y1 = pnext()
x2, y2 = pnext()
x, y = pnext()
self.curve_to(mx + x1, my + y1, mx + x2, my + y2, mx + x, my + y)
elif opcode == 'S':
self.curve_to(2 * self.x - self.last_cx, 2 * self.y - self.last_cy, *(pnext() + pnext()))
elif opcode == 's':
mx = self.x
my = self.y
x1, y1 = 2 * self.x - self.last_cx, 2 * self.y - self.last_cy
x2, y2 = pnext()
x, y = pnext()
self.curve_to(x1, y1, mx + x2, my + y2, mx + x, my + y)
elif opcode == 'A':
rx, ry = pnext()
phi = float(pathdata.pop(0))
large_arc = int(pathdata.pop(0))
sweep = int(pathdata.pop(0))
x, y = pnext()
self.arc_to(rx, ry, phi, large_arc, sweep, x, y)
elif opcode in 'zZ':
self.close_path()
elif opcode == 'L':
self.line_to(*pnext())
elif opcode == 'l':
x, y = pnext()
self.line_to(self.x + x, self.y + y)
elif opcode == 'H':
x = float(pathdata.pop(0))
self.line_to(x, self.y)
elif opcode == 'h':
x = float(pathdata.pop(0))
self.line_to(self.x + x, self.y)
elif opcode == 'V':
y = float(pathdata.pop(0))
self.line_to(self.x, y)
elif opcode == 'v':
y = float(pathdata.pop(0))
self.line_to(self.x, self.y + y)
else:
self.warn("Unrecognised opcode: " + opcode)
self.end_path()
elif e.tag.endswith('rect'):
x = float(e.get('x'))
y = float(e.get('y'))
h = float(e.get('height'))
w = float(e.get('width'))
rx = e.get('rx', None)
ry = e.get('ry', None)
if rx is None and ry is None:
# Default rectangle
self.new_path()
self.set_position(x, y)
self.line_to(x + w, y)
self.line_to(x + w, y + h)
self.line_to(x, y + h)
self.line_to(x, y)
self.end_path()
else:
# Rounded corners
# - if one or the other of rx or ry is specified, use that value for both
if rx is None and ry is not None:
rx = ry
if ry is None and rx is not None:
ry = rx
rx = float(rx)
ry = float(ry)
rx = min(rx, w/2)
ry = min(ry, h/2)
self.new_path()
self.set_position(x, y + ry)
self.line_to(x, y + h - ry)
self.arc_to(rx, ry, 0, 0, 0, x + rx, y + h)
self.line_to(x + w - rx, y + h)
self.arc_to(rx, ry, 0, 0, 0, x + w, y + h - ry)
self.line_to(x + w, y + ry)
self.arc_to(rx, ry, 0, 0, 0, x + w - rx, y)
self.line_to(x + rx, y)
self.arc_to(rx, ry, 0, 0, 0, x, y + ry)
self.end_path()
elif e.tag.endswith('polyline') or e.tag.endswith('polygon'):
pathdata = e.get('points')
pathdata = re.findall("(-?[0-9]+\.?[0-9]*(?:e-?[0-9]*)?)", pathdata)
def pnext():
return (float(pathdata.pop(0)), float(pathdata.pop(0)))
self.new_path()
while pathdata:
self.line_to(*pnext())
if e.tag.endswith('polygon'):
self.close_path()
self.end_path()
elif e.tag.endswith('line'):
x1 = float(e.get('x1'))
y1 = float(e.get('y1'))
x2 = float(e.get('x2'))
y2 = float(e.get('y2'))
self.new_path()
self.set_position(x1, y1)
self.line_to(x2, y2)
self.end_path()
elif e.tag.endswith('circle'):
cx = float(e.get('cx'))
cy = float(e.get('cy'))
r = float(e.get('r'))
self.new_path()
for i in range(self.circle_points):
theta = 2 * i * math.pi / self.circle_points
self.line_to(cx + r * math.cos(theta), cy + r * math.sin(theta))
self.close_path()
self.end_path()
elif e.tag.endswith('ellipse'):
cx = float(e.get('cx'))
cy = float(e.get('cy'))
rx = float(e.get('rx'))
ry = float(e.get('ry'))
self.new_path()
for i in range(self.circle_points):
theta = 2 * i * math.pi / self.circle_points
self.line_to(cx + rx * math.cos(theta), cy + ry * math.sin(theta))
self.close_path()
self.end_path()
elif e.tag.endswith('linearGradient'):
self.gradients[e.get('id')] = LinearGradient(e, self)
elif e.tag.endswith('radialGradient'):
self.gradients[e.get('id')] = RadialGradient(e, self)
for c in e.getchildren():
try:
self.parse_element(c)
except Exception as ex:
print(f'Exception while parsing element: {c}')
raise
self.transform = oldtransform
self.opacity = oldopacity
def new_path(self):
self.x = 0
self.y = 0
self.close_index = 0
self.path = []
self.loop = []
def close_path(self):
self.loop.append(self.loop[0][:])
self.path.append(self.loop)
self.loop = []
def set_position(self, x, y):
self.x = x
self.y = y
self.loop.append([x, y])
def arc_to(self, rx, ry, phi, large_arc, sweep, x, y):
# This function is made out of magical fairy dust
# http://www.w3.org/TR/2003/REC-SVG11-20030114/implnote.html#ArcImplementationNotes
x1 = self.x
y1 = self.y
x2 = x
y2 = y
cp = math.cos(phi)
sp = math.sin(phi)
dx = .5 * (x1 - x2)
dy = .5 * (y1 - y2)
x_ = cp * dx + sp * dy
y_ = -sp * dx + cp * dy
r2 = (((rx * ry) ** 2 - (rx * y_) ** 2 - (ry * x_) ** 2) /
((rx * y_) ** 2 + (ry * x_) ** 2))
if r2 < 0: r2 = 0
r = math.sqrt(r2)
if large_arc == sweep:
r = -r
cx_ = r * rx * y_ / ry
cy_ = -r * ry * x_ / rx
cx = cp * cx_ - sp * cy_ + .5 * (x1 + x2)
cy = sp * cx_ + cp * cy_ + .5 * (y1 + y2)
def angle(u, v):
a = math.acos((u[0] * v[0] + u[1] * v[1]) / math.sqrt((u[0] ** 2 + u[1] ** 2) * (v[0] ** 2 + v[1] ** 2)))
sgn = 1 if u[0] * v[1] > u[1] * v[0] else -1
return sgn * a
psi = angle((1, 0), ((x_ - cx_) / rx, (y_ - cy_) / ry))
delta = angle(((x_ - cx_) / rx, (y_ - cy_) / ry),
((-x_ - cx_) / rx, (-y_ - cy_) / ry))
if sweep and delta < 0: delta += math.pi * 2
if not sweep and delta > 0: delta -= math.pi * 2
n_points = max(int(abs(self.circle_points * delta / (2 * math.pi))), 1)
for i in range(n_points + 1):
theta = psi + i * delta / n_points
ct = math.cos(theta)
st = math.sin(theta)
self.line_to(cp * rx * ct - sp * ry * st + cx,
sp * rx * ct + cp * ry * st + cy)
def curve_to(self, x1, y1, x2, y2, x, y):
if not self.bezier_coefficients:
for i in range(self.bezier_points + 1):
t = float(i) / self.bezier_points
t0 = (1 - t) ** 3
t1 = 3 * t * (1 - t) ** 2
t2 = 3 * t ** 2 * (1 - t)
t3 = t ** 3
self.bezier_coefficients.append([t0, t1, t2, t3])
self.last_cx = x2
self.last_cy = y2
for i, t in enumerate(self.bezier_coefficients):
px = t[0] * self.x + t[1] * x1 + t[2] * x2 + t[3] * x
py = t[0] * self.y + t[1] * y1 + t[2] * y2 + t[3] * y
self.loop.append([px, py])
self.x, self.y = px, py
def line_to(self, x, y):
self.set_position(x, y)
def end_path(self):
self.path.append(self.loop)
if self.path:
path = []
for orig_loop in self.path:
if not orig_loop: continue
loop = [orig_loop[0]]
for pt in orig_loop:
if (pt[0] - loop[-1][0]) ** 2 + (pt[1] - loop[-1][1]) ** 2 > TOLERANCE:
loop.append(pt)
path.append(loop)
path_object = SvgPath(path if self.stroke else None, self.stroke,
self.triangulate(path) if self.fill else None, self.fill,
self.transform, self.path_id, self.path_title, self.path_description)
self.paths.append(path_object)
self.path_lookup[self.path_id] = path_object
self.path = []
def triangulate(self, looplist):
tlist = []
self.curr_shape = []
spareverts = []
@set_tess_callback(gl.GLU_TESS_VERTEX)
def vertexCallback(vertex):
vertex = cast(vertex, POINTER(gl.GLdouble))
self.curr_shape.append(list(vertex[0:2]))
@set_tess_callback(gl.GLU_TESS_BEGIN)
def beginCallback(which):
self.tess_style = which
@set_tess_callback(gl.GLU_TESS_END)
def endCallback():
if self.tess_style == gl.GL_TRIANGLE_FAN:
c = self.curr_shape.pop(0)
p1 = self.curr_shape.pop(0)
while self.curr_shape:
p2 = self.curr_shape.pop(0)
tlist.extend([c, p1, p2])
p1 = p2
elif self.tess_style == gl.GL_TRIANGLE_STRIP:
p1 = self.curr_shape.pop(0)
p2 = self.curr_shape.pop(0)
while self.curr_shape:
p3 = self.curr_shape.pop(0)
tlist.extend([p1, p2, p3])
p1 = p2
p2 = p3
elif self.tess_style == gl.GL_TRIANGLES:
tlist.extend(self.curr_shape)
else:
self.warn("Unrecognised tesselation style: %d" % (self.tess_style,))
self.tess_style = None
self.curr_shape = []
@set_tess_callback(gl.GLU_TESS_ERROR)
def errorCallback(code):
ptr = gl.gluErrorString(code)
err = ''
idx = 0
while ptr[idx]:
err += chr(ptr[idx])
idx += 1
self.warn("GLU Tesselation Error: " + err)
@set_tess_callback(gl.GLU_TESS_COMBINE)
def combineCallback(coords, vertex_data, weights, dataOut):
x, y, z = coords[0:3]
data = (gl.GLdouble * 3)(x, y, z)
dataOut[0] = cast(data, POINTER(gl.GLvoid))
spareverts.append(data)
data_lists = []
for vlist in looplist:
d_list = []
for x, y in vlist:
v_data = (gl.GLdouble * 3)(x, y, 0)
d_list.append(v_data)
data_lists.append(d_list)
gl.gluTessBeginPolygon(tess, None)
for d_list in data_lists:
gl.gluTessBeginContour(tess)
for v_data in d_list:
gl.gluTessVertex(tess, v_data, v_data)
gl.gluTessEndContour(tess)
gl.gluTessEndPolygon(tess)
return tlist
def warn(self, message):
print(f"Warning: SVG Parser ({self.filename}) - {message}")
|
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
def permutations(nlen, nums, path):
if not nums:
res.append(path)
for i in range(nlen):
permutations(nlen - 1, nums[:i] + nums[i + 1:], path + [nums[i]])
res = []
nlen = len(nums)
permutations(nlen, nums, [])
return res
|
# convert to a single pdf - using latex
import json
import os
import re
from utils import (load_scottish_psalter, load_sing_psalms, make_output_folder,
remove_folder, zip_folder)
def create_latex_body(psalms, toc_name, output_name):
body = u'''\\documentclass[11pt,a4paper]{report}
\\setlength{\parindent}{0pt}
\\usepackage{fixltx2e}
\\usepackage{hyperref}
\\usepackage[top=2cm, bottom=2cm, left=2.5cm, right=2.5cm]{geometry}
\\title{''' + toc_name + '''}
\\renewcommand*\contentsname{''' + toc_name + '''}
\\begin{document}
\\tableofcontents
\\pagebreak
\\pagestyle{empty}\n'''
for psalm in psalms:
body += "\\addcontentsline{toc}{section}{" + psalm['name'] + "}\n"
body += "\\section*{" + psalm['name'] + "}\n\n"
body += "\\textit{" + psalm['metre'] + "}\\\\\n\n"
for v in psalm['stanzas']:
# superscript verse #s
for ii in re.findall(r'\d+', v):
num = re.findall(r'\d+', ii)
v = v.replace(ii, '\\textsuperscript{' + num[0] + '}')
body += v.replace("\n", "\\\\") + "\\\\\n\n"
body += "\\pagebreak"
body += '\\end{document}'
for quote in re.findall(r'''\".+\"''', body):
body = body.replace(quote, "``" + quote.replace('"', '') + "''")
with open(output_name, 'w') as f:
f.write(body)
def convert2latex():
"""Convert both sets of Psalms to text files and save in output/plain_text"""
output_folder = make_output_folder(["tex"])
# sing psalms
psalms = load_sing_psalms()
create_latex_body(psalms, "Sing Psalms", os.path.join(output_folder, 'Sing Psalms.tex'))
# trad psalms
psalms = load_scottish_psalter()
create_latex_body(psalms, "Scottish Psalter", os.path.join(output_folder, 'Scottish Psalter.tex'))
zip_folder(output_folder)
remove_folder(output_folder)
if __name__ == '__main__':
convert2latex()
|
import torch
from torch import nn
from interpret.attr import GuidedBackProp
def test_guidedbackprop():
inp = torch.randn(1,50, requires_grad=True)
l1 = nn.Linear(50, 1)
model = nn.Sequential(nn.ReLU(), l1)
attr = GuidedBackProp(model, inp, target_class=0)
relu_inp = torch.relu(inp).detach().clone().requires_grad_(True)
l1(relu_inp).backward()
manual_guide = relu_inp.grad.clone()
manual_guide[inp<0] = 0
manual_guide[manual_guide<0] = 0
assert (manual_guide == inp.grad).all()
def test_deconvnet():
inp = torch.randn(1,50, requires_grad=True)
l1 = nn.Linear(50, 1)
model = nn.Sequential(nn.ReLU(), l1)
attr = GuidedBackProp(model, inp, target_class=0, deconvnet=True)
relu_inp = torch.relu(inp).detach().clone().requires_grad_(True)
l1(relu_inp).backward()
manual_guide = relu_inp.grad.clone()
manual_guide[manual_guide<0] = 0
assert (manual_guide == inp.grad).all()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def cf_alerts(cli_ctx, *_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.alertsmanagement import RecoveryServices
return get_mgmt_service_client(cli_ctx, RecoveryServices)
def cf_smart_detector_alert_rules(cli_ctx, *_):
return cf_alerts(cli_ctx).smart_detector_alert_rules
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from cloudcafe.identity.v2_0.common.models.base import BaseIdentityListModel
class Users(BaseIdentityListModel):
def __init__(self, users=None):
"""
Models a users list returned by keystone
"""
super(Users, self).__init__()
self.extend(users or [])
@classmethod
def _list_to_obj(cls, user_dict_list):
users = Users()
for user_dict in user_dict_list:
user = User._dict_to_obj(user_dict)
users.append(user)
return users
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = json.loads(serialized_str)
return cls._list_to_obj(json_dict.get('users'))
class User(BaseIdentityListModel):
def __init__(self, id_=None, name=None, tenant_id=None,
enabled=None, email=None):
"""
Models a user object returned by keystone
"""
super(User, self).__init__()
self.id_ = id_
self.name = name
self.tenant_id = tenant_id
self.enabled = enabled
self.email = email
def _obj_to_json(self):
json_dict = {"user": {"name": self.name,
"tenantId": self.tenant_id,
"enabled": self.enabled,
"email": self.email}}
return json.dumps(json_dict)
@classmethod
def _dict_to_obj(cls, json_dict):
user = User(id_=json_dict.get('id'),
name=json_dict.get('name'),
tenant_id=json_dict.get('tenantId'),
enabled=json_dict.get('enabled'),
email=json_dict.get('email'))
return user
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = json.loads(serialized_str)
return cls._dict_to_obj(json_dict.get('user'))
|
def _v8_disable_pointer_compression(settings, attr):
return {
"//third_party/v8/HEAD:v8_enable_pointer_compression": "False",
}
v8_disable_pointer_compression = transition(
implementation = _v8_disable_pointer_compression,
inputs = [],
outputs = ["//third_party/v8/HEAD:v8_enable_pointer_compression"],
)
# The implementation of transition_rule: all this does is copy the
# cc_binary's output to its own output and propagate its runfiles
# and executable to use for "$ bazel run".
#
# This makes transition_rule as close to a pure wrapper of cc_binary
# as possible.
def _v8_binary_non_pointer_compression(ctx):
binary = ctx.attr.binary[0]
outfile = ctx.actions.declare_file(ctx.label.name)
cc_binary_outfile = binary[DefaultInfo].files.to_list()[0]
ctx.actions.run_shell(
inputs = [cc_binary_outfile],
outputs = [outfile],
command = "cp %s %s" % (cc_binary_outfile.path, outfile.path),
)
return [
DefaultInfo(
executable = outfile,
data_runfiles = binary[DefaultInfo].data_runfiles,
),
]
# The purpose of this rule is to transition to a config where v8_target_cpu is
# set to the appropriate architecture, which will remain in place through exec
# transitions, so mksnapshot can for instance build on x64 but for arm64.
v8_binary_non_pointer_compression = rule(
implementation = _v8_binary_non_pointer_compression,
attrs = {
# This is the cc_binary whose deps will select() on that feature.
# Note specificaly how it's configured with v8_target_cpu_transition, which
# ensures that setting propagates down the graph.
"binary": attr.label(cfg = v8_disable_pointer_compression),
# This is a stock Bazel requirement for any rule that uses Starlark
# transitions. It's okay to copy the below verbatim for all such rules.
#
# The purpose of this requirement is to give the ability to restrict
# which packages can invoke these rules, since Starlark transitions
# make much larger graphs possible that can have memory and performance
# consequences for your build. The whitelist defaults to "everything".
# But you can redefine it more strictly if you feel that's prudent.
"_allowlist_function_transition": attr.label(
default = "//tools/allowlists/function_transition_allowlist",
),
},
# Making this executable means it works with "$ bazel run".
executable = True,
)
|
from .family import Family
from .likelihood import Likelihood
from .prior import Prior
from .scaler_mle import PriorScalerMLE
from .scaler_default import PriorScaler
__all__ = ["Family", "Likelihood", "Prior", "PriorScaler", "PriorScalerMLE"]
|
"""
=======================================================
Reading an inverse operator and view source space in 3D
=======================================================
"""
# Author: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
print(__doc__)
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator
data_path = sample.data_path()
fname = data_path
fname += '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inv = read_inverse_operator(fname)
print("Method: %s" % inv['methods'])
print("fMRI prior: %s" % inv['fmri_prior'])
print("Number of sources: %s" % inv['nsource'])
print("Number of channels: %s" % inv['nchan'])
###############################################################################
# Show result on 3D source space
lh_points = inv['src'][0]['rr']
lh_faces = inv['src'][0]['use_tris']
rh_points = inv['src'][1]['rr']
rh_faces = inv['src'][1]['use_tris']
try:
from enthought.mayavi import mlab
except:
from mayavi import mlab
mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
lh_faces)
mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
rh_faces)
|
import logging
from vcftoolbox import Genotype
from puzzle.models import Genotype as puzzle_genotype
logger = logging.getLogger(__name__)
class GenotypeExtras(object):
"""Class to store methods that deals with genotyping"""
def _add_genotype_calls(self, variant_obj, variant_line, case_obj):
"""Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case)
"""
variant_line = variant_line.split('\t')
#if there is gt calls we have no individuals to add
if len(variant_line) > 8:
gt_format = variant_line[8].split(':')
for individual in case_obj.individuals:
sample_id = individual.ind_id
index = individual.ind_index
gt_call = variant_line[9+index].split(':')
raw_call = dict(zip(gt_format, gt_call))
genotype = Genotype(**raw_call)
variant_obj.add_individual(puzzle_genotype(
sample_id = sample_id,
genotype = genotype.genotype,
case_id = case_obj.name,
phenotype = individual.phenotype,
ref_depth = genotype.ref_depth,
alt_depth = genotype.alt_depth,
genotype_quality = genotype.genotype_quality,
depth = genotype.depth_of_coverage,
supporting_evidence = genotype.supporting_evidence,
pe_support = genotype.pe_support,
sr_support = genotype.sr_support,
))
|
from django.db.models import Count, Avg, Sum, IntegerField, Case, When, Q, Min, FloatField, F
from django.db.models.functions import TruncDate
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse, JsonResponse
from django.utils.translation import ugettext as _
from django.utils import timezone
from dateutil.relativedelta import relativedelta
import unicodecsv as csv
from collections import Counter, OrderedDict
from bisect import bisect
from calendar import month_name
from datetime import datetime
from danceschool.core.models import Customer, Series, EventOccurrence, Registration, EventRegistration, DanceTypeLevel, Location, DanceRole, SeriesTeacher, Instructor
from danceschool.core.utils.requests import getDateTimeFromGet
from danceschool.core.utils.timezone import ensure_timezone
def getAveragesByClassType(startDate=None,endDate=None):
# If a date filter was passed in GET, then apply it
when_all = {
'classdescription__series__eventregistration__cancelled': False,
'classdescription__series__eventregistration__dropIn': False
}
timeFilters = {}
classFilters = {}
roleFilters = Q()
if startDate:
timeFilters['classdescription__series__startTime__gte'] = startDate
classFilters['startTime__gte'] = startDate
roleFilters = roleFilters & (Q(eventrole__event__startTime__gte=startDate) | Q(eventregistration__event__startTime__gte=startDate))
if endDate:
timeFilters['classdescription__series__startTime__lte'] = endDate
classFilters['startTime__lte'] = endDate
roleFilters = roleFilters & (Q(eventrole__event__startTime__lte=endDate) | Q(eventregistration__event__startTime__lte=endDate))
when_all.update(timeFilters)
role_list = DanceRole.objects.filter(roleFilters).distinct()
annotations = {'registrations': Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField()))}
values_list = ['name', 'danceType__name','registrations']
for this_role in role_list:
annotations[this_role.pluralName] = Sum(Case(When(Q(Q(**when_all) & Q(classdescription__series__eventregistration__role=this_role)),then=1),output_field=IntegerField()))
values_list.append(this_role.pluralName)
registration_counts = list(DanceTypeLevel.objects.annotate(**annotations).values_list(*values_list))
class_counter = Counter([(x.classDescription.danceTypeLevel.name, x.classDescription.danceTypeLevel.danceType.name) for x in Series.objects.filter(**classFilters).distinct()])
results = {}
for list_item in registration_counts:
type_name = ' '.join((str(list_item[0]),str(list_item[1])))
results[type_name] = {
str(_('Registrations')): list_item[2],
}
m = 3
for this_role in role_list:
results[type_name][str(_('Total %s' % this_role.pluralName))] = list_item[m]
m += 1
for k,count in class_counter.items():
type_name = ' '.join((str(k[0]),str(k[1])))
results[type_name].update({
str(_('Series')): count
})
for k,v in results.items():
if results[k].get(str(_('Series'))):
results[k].update({
str(_('Average Registrations')): (results[k][str(_('Registrations'))] or 0) / float(results[k][str(_('Series'))]),
})
for this_role in role_list:
results[k][str(_('Average %s' % this_role.pluralName))] = (results[k][str(_('Total %s' % this_role.pluralName))] or 0) / float(results[k][str(_('Series'))])
return results
@staff_member_required
def AveragesByClassTypeJSON(request):
startDate = getDateTimeFromGet(request,'startDate')
endDate = getDateTimeFromGet(request,'endDate')
results = getAveragesByClassType(startDate,endDate)
# Needs to return a list, not a dict
# Also, filter out types with no series or registrations
# and sort descending
results_list = [dict({'type': k},**dict(v)) for k,v in results.items() if v.get(str(_('Series'))) or v.get(str(_('Registrations')))]
sorted_list = sorted(results_list, key=lambda k: k[str(_('Series'))],reverse=True)
return JsonResponse(sorted_list,safe=False)
@staff_member_required
def AveragesByClassTypeCSV(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="averagesByClassDescriptionType.csv"'
writer = csv.writer(response)
startDate = getDateTimeFromGet(request,'startDate')
endDate = getDateTimeFromGet(request,'endDate')
results = getAveragesByClassType(startDate,endDate)
role_names = [x.replace(str(_('Average ')),'') for x in results.keys() if x.startswith(str(_('Average ')))]
header_list = [str(_('Class Type')),str(_('Total Classes')),str(_('Total Students')),str(_('Avg. Students/Class'))]
for this_role in role_names:
header_list += [str(_('Total %s' % this_role)), str(_('Avg. %s/Class' % this_role))]
# Note: These are not translated because the chart Javascript looks for these keys
writer.writerow(header_list)
for key,value in results.items():
this_row = [
key,
value.get(str(_('Series')),0),
value.get(str(_('Registrations')),0),
value.get(str(_('Average Registrations')),None),
]
for this_role in role_names:
this_row += [
value.get(str(_('Total %s' % this_role)), 0),
value.get(str(_('Average %s' % this_role)), 0)
]
writer.writerow(this_row)
return response
def getClassTypeMonthlyData(year=None, series=None, typeLimit=None):
'''
To break out by class type and month simultaneously, get data for each
series and aggregate by class type.
'''
# If no year specified, report current year to date.
if not year:
year = timezone.now().year
role_list = DanceRole.objects.distinct()
# Report data on all students registered unless otherwise specified
if series not in ['registrations','studenthours'] and series not in [x.pluralName for x in role_list]:
series = 'registrations'
when_all = {
'eventregistration__dropIn': False,
'eventregistration__cancelled': False,
}
annotations = {'registrations': Sum(Case(When(Q(**when_all),then=1),output_field=FloatField()))}
for this_role in role_list:
annotations[this_role.pluralName] = Sum(Case(When(Q(Q(**when_all) & Q(eventregistration__role=this_role)),then=1),output_field=FloatField()))
series_counts = Series.objects.filter(year=year).annotate(**annotations).annotate(studenthours=F('duration') * F('registrations')).select_related('classDescription__danceTypeLevel__danceType','classDescription__danceTypeLevel')
# If no limit specified on number of types, then do not aggregate dance types.
# Otherwise, report the typeLimit most common types individually, and report all
# others as other. This gets tuples of names and counts
dance_type_counts = [(dance_type,count) for dance_type,count in Counter([x.classDescription.danceTypeLevel for x in series_counts]).items()]
dance_type_counts.sort(key=lambda k: k[1],reverse=True)
if typeLimit:
dance_types = [x[0] for x in dance_type_counts[:typeLimit]]
else:
dance_types = [x[0] for x in dance_type_counts]
results = []
# Month by month, calculate the result data
for month in range(1,13):
this_month_result = {
'month': month,
'month_name': month_name[month],
}
for dance_type in dance_types:
this_month_result[dance_type.__str__()] = \
series_counts.filter(classDescription__danceTypeLevel=dance_type,month=month).aggregate(Sum(series))['%s__sum' % series]
if typeLimit:
this_month_result['Other'] = \
series_counts.filter(month=month).exclude(classDescription__danceTypeLevel__in=dance_types).aggregate(Sum(series))['%s__sum' % series]
results.append(this_month_result)
# Now get totals
totals_result = {
'month': 'Totals',
'month_name': 'totals',
}
for dance_type in dance_types:
totals_result[dance_type.__str__()] = \
series_counts.filter(classDescription__danceTypeLevel=dance_type).aggregate(Sum(series))['%s__sum' % series]
if typeLimit:
totals_result['Other'] = \
series_counts.exclude(classDescription__danceTypeLevel__in=dance_types).aggregate(Sum(series))['%s__sum' % series]
results.append(totals_result)
return results
def ClassTypeMonthlyJSON(request):
try:
year = int(request.GET.get('year'))
except (ValueError, TypeError):
year = None
try:
typeLimit = int(request.GET.get('typeLimit'))
except (ValueError, TypeError):
typeLimit = None
series = request.GET.get('series')
results = getClassTypeMonthlyData(year=year,series=series,typeLimit=typeLimit)
return JsonResponse(results, safe=False)
def getClassCountHistogramData(cohortStart=None,cohortEnd=None):
# Note: Bins are inclusive, and 99999 means 'or more'. That should last us awhile.
bins = [
(1,1),
(2,2),
(3,3),
(4,4),
(5,5),
(6,6),
(7,7),
(8,8),
(9,9),
(10,15),
(16,20),
(21,99999)]
when_all = {
'eventregistration__dropIn': False,
'eventregistration__cancelled':False,
}
cohortFilters = {}
roleFilters = {}
if cohortStart:
cohortFilters['eventregistration__event__startTime__min__gte'] = cohortStart
roleFilters['eventregistration__event__startTime__gte'] = cohortStart
if cohortEnd:
cohortFilters['eventregistration__event__startTime__min__lte'] = cohortEnd
roleFilters['eventregistration__event__startTime__lte'] = cohortEnd
role_list = DanceRole.objects.filter(**roleFilters).distinct()
annotations = {
'eventregistration__event__startTime__min': Min('eventregistration__event__startTime'),
'registrations': Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField())),
}
for this_role in role_list:
annotations[this_role.pluralName] = Sum(Case(When(Q(Q(**when_all) & Q(eventregistration__role=this_role)),then=1),output_field=IntegerField()))
customers = Customer.objects.annotate(**annotations).filter(**cohortFilters).distinct()
totalCustomers = customers.filter(registrations__gt=0).count()
totalClasses = [x.registrations for x in customers if x.registrations]
totalClasses.sort()
totalsByRole = {}
for this_role in role_list:
totalsByRole[this_role.pluralName] = {
'customers': customers.filter(**{this_role.pluralName + '__gt': 0}).count(),
'classes': [getattr(x,this_role.pluralName,None) for x in customers if getattr(x,this_role.pluralName,None)],
}
totalsByRole[this_role.pluralName]['classes'].sort()
results = {}
lastAll = 0
lastByRole = {this_role.pluralName:0 for this_role in role_list}
iByRole = {}
for this_bin in bins:
range_max = this_bin[1]
if this_bin[0] == this_bin[1]:
this_label = '%s' % this_bin[0]
elif this_bin[1] == 99999:
this_label = str(_('%s or more' % this_bin[0]))
else:
this_label = '%s-%s' % this_bin
i_all = bisect(totalClasses,range_max,lastAll)
iByRole = {
this_role.pluralName:bisect(totalsByRole[this_role.pluralName]['classes'],range_max,lastByRole[this_role.pluralName])
for this_role in role_list
}
# Note: These are not translated because the chart Javascript looks for these keys
results.update({
this_label:
{
str(_('# Students')): (i_all - lastAll),
str(_('Percentage')): 100 * (i_all - lastAll) / (float(totalCustomers) or 1),
'bin': this_bin,
},
})
for this_role in role_list:
results[this_label].update({
'# ' + this_role.pluralName: (iByRole[this_role.pluralName] - lastByRole[this_role.pluralName]),
'Percentage ' + this_role.pluralName: 100 * (
iByRole[this_role.pluralName] - lastByRole[this_role.pluralName]
) /
(float(totalsByRole[this_role.pluralName]['customers']) or 1),
})
lastAll = i_all
lastByRole = {this_role.pluralName:iByRole[this_role.pluralName] for this_role in role_list}
return results
@staff_member_required
def ClassCountHistogramJSON(request):
cohortStart = getDateTimeFromGet(request,'cohortStart')
cohortEnd = getDateTimeFromGet(request,'cohortEnd')
results = getClassCountHistogramData(cohortStart=cohortStart,cohortEnd=cohortEnd)
# Needs to return a sorted list, not a dict
results_list = [dict({'bin_label': k},**dict(v)) for k,v in results.items()]
sorted_list = sorted(results_list, key=lambda k: k['bin'][0])
return JsonResponse(sorted_list,safe=False)
@staff_member_required
def ClassCountHistogramCSV(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="studentHistogramData.csv"'
cohortStart = getDateTimeFromGet(request,'cohortStart')
cohortEnd = getDateTimeFromGet(request,'cohortEnd')
results = getClassCountHistogramData(cohortStart=cohortStart,cohortEnd=cohortEnd)
writer = csv.writer(response)
# Note: These are not translated because the chart Javascript looks for these keys
header_row = ['# of Classes']
keys = OrderedDict()
for v in results.values():
keys.update(v)
header_row += [x for x in keys.keys()]
writer.writerow(header_row)
for k,v in results.items():
this_row = [k]
this_row += [v.get(x,None) for x in keys.keys()]
writer.writerow(this_row)
return response
def getMonthlyPerformance():
'''
This function does the work of compiling monthly performance data
that can either be rendered as CSV or as JSON
'''
when_all = {
'eventregistration__dropIn': False,
'eventregistration__cancelled': False,
}
# Get objects at the Series level so that we can calculate StudentHours
series_counts = list(Series.objects.annotate(
eventregistrations=Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField())),)
.values('year','month','eventregistrations','duration'))
for series in series_counts:
series['studenthours'] = (series.get('eventregistrations') or 0) * (series.get('duration') or 0)
all_years = set([x['year'] for x in series_counts])
dataseries_list = ['EventRegistrations', 'Registrations','Hours','StudentHours','AvgStudents']
yearTotals = {}
# Initialize dictionaries
for dataseries in dataseries_list:
yearTotals[dataseries] = {'MonthlyAverage': {}}
for year in all_years:
yearTotals[dataseries][year] = {}
# Fill in by year and month for a cleaner looping process
for year in all_years:
# Monthly Totals
for month in range(1,13):
# Total EventRegistrations per month is retrieved by the query above.
yearTotals['EventRegistrations'][year][month] = sum([x['eventregistrations'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
# Total Registrations per month and hours per month require a separate query for each month
yearTotals['Registrations'][year][month] = len(Registration.objects.filter(eventregistration__dropIn=False, eventregistration__cancelled=False,eventregistration__event__year=year,eventregistration__event__month=month).distinct())
yearTotals['Hours'][year][month] = sum([x['duration'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
yearTotals['StudentHours'][year][month] = sum([x['studenthours'] or 0 for x in series_counts if x['month'] == month and x['year'] == year])
if yearTotals['Hours'][year][month] > 0:
yearTotals['AvgStudents'][year][month] = yearTotals['StudentHours'][year][month] / float(yearTotals['Hours'][year][month])
else:
yearTotals['AvgStudents'][year][month] = 0
# Annual Totals
for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']:
yearTotals[sub_series][year]['Total'] = sum([x for x in yearTotals[sub_series][year].values()])
# Annual (Monthly) Averages
month_count = len([x for k,x in yearTotals['Hours'][year].items() if k in range(1,13) and x > 0])
if month_count > 0:
for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']:
yearTotals[sub_series][year]['Average'] = yearTotals[sub_series][year]['Total'] / float(month_count)
yearTotals['AvgStudents'][year]['Average'] = yearTotals['StudentHours'][year]['Total'] / float(yearTotals['Hours'][year]['Total'])
# Monthly Averages
for month in range(1,13):
yearly_hours_data = [x[month] for k,x in yearTotals['Hours'].items() if k in all_years and x[month] > 0]
yearly_studenthours_data = [x[month] for k,x in yearTotals['StudentHours'].items() if k in all_years and x[month] > 0]
yearly_eventregistrations_data = [x[month] for k,x in yearTotals['EventRegistrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0]
yearly_registrations_data = [x[month] for k,x in yearTotals['Registrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0]
year_count = len(yearly_hours_data)
if year_count > 0:
yearTotals['EventRegistrations']['MonthlyAverage'][month] = sum([x for x in yearly_eventregistrations_data]) / year_count
yearTotals['Registrations']['MonthlyAverage'][month] = sum([x for x in yearly_registrations_data]) / year_count
yearTotals['Hours']['MonthlyAverage'][month] = sum([x for x in yearly_hours_data]) / year_count
yearTotals['StudentHours']['MonthlyAverage'][month] = sum([x for x in yearly_studenthours_data]) / year_count
yearTotals['AvgStudents']['MonthlyAverage'][month] = yearTotals['StudentHours']['MonthlyAverage'][month] / float(yearTotals['Hours']['MonthlyAverage'][month])
return yearTotals
@staff_member_required
def MonthlyPerformanceJSON(request):
series = request.GET.get('series')
if series not in ['AvgStudents','Registrations','EventRegistrations','Hours','StudentHours']:
series = 'EventRegistrations'
yearTotals = getMonthlyPerformance()[series]
# Return JSON as lists, not as dictionaries, for c3.js
# yearTotals_list = [dict(v,**{'year':k}) for k, v in yearTotals.items()]
# Now make the lists so that there is one row per month, not one row per year,
# to make things easier for working with c3.js.yearTotals
monthTotals_list = []
years = list(set([k for k,v in yearTotals.items()]))
# Only include calendar months for graphing
for month in range(1,13):
this_month_data = {'month': month, 'month_name': month_name[month]}
for year in years:
this_month_data[year] = yearTotals[year].get(month)
monthTotals_list.append(this_month_data)
monthTotals_list_sorted = sorted(monthTotals_list, key=lambda k: k['month'])
return JsonResponse(monthTotals_list_sorted,safe=False)
@staff_member_required
def MonthlyPerformanceCSV(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="monthlyPerformance.csv"'
writer = csv.writer(response)
yearTotals = getMonthlyPerformance()
all_years = [k for k in yearTotals['Hours'].keys() if k != 'MonthlyAverage']
all_years.sort()
# Write headers first
headers_list = ['Data Series','Month','All-Time Avg.']
for year in all_years:
headers_list.append(str(year))
writer.writerow(headers_list)
# Note: These are not translated because the chart Javascript looks for these keys
yearTotals_keys = {
'Total Student-Hours': 'StudentHours',
'Avg. Students/Hour': 'AvgStudents',
'Hours of Instruction': 'Hours',
'Unique Registrations': 'Registrations',
'Total Students': 'EventRegistrations',
}
for series,key in yearTotals_keys.items():
for month in range(1,13):
this_row = [
series,
month_name[month],
yearTotals[key]['MonthlyAverage'][month],
]
for year in all_years:
this_row.append(yearTotals[key][year][month])
writer.writerow(this_row)
return response
def getLocationPerformance(startDate=None,endDate=None):
timeFilters = {}
if startDate:
timeFilters['event__startTime__gte'] = startDate
if endDate:
timeFilters['event__startTime__lte'] = endDate
seriesCounts = list(Location.objects.values_list('name').filter(**timeFilters).distinct().annotate(Count('event')))
timeFilters.update({
'event__eventregistration__dropIn':False,
'event__eventregistration__cancelled':False
})
eventRegistrationCounts = list(Location.objects.values_list('name').filter(**timeFilters).distinct().annotate(Count('event')))
results = {}
for list_item in seriesCounts:
results[list_item[0]] = {'series': list_item[1]}
for list_item in eventRegistrationCounts:
results[list_item[0]].update({'registrations': list_item[1]})
return results
@staff_member_required
def LocationPerformanceJSON(request):
startDate = getDateTimeFromGet(request,'startDate')
endDate = getDateTimeFromGet(request,'endDate')
results = getLocationPerformance(startDate,endDate)
# Needs to return a list, not a dict
results_list = [dict({'name': k},**dict(v)) for k,v in results.items()]
return JsonResponse(results_list,safe=False)
@staff_member_required
def LocationPerformanceCSV(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="locationPerformance.csv"'
startDate = getDateTimeFromGet(request,'startDate')
endDate = getDateTimeFromGet(request,'endDate')
results = getLocationPerformance(startDate,endDate)
writer = csv.writer(response)
# Note: These are not translated because the chart Javascript looks for these keys
writer.writerow(['Location','# Series','# Students','Avg. Students/Series'])
for location,data in results.items():
writer.writerow([
location, # The location name
data.get('series',0), # The num. of series taught there
data.get('registrations',0), # The num. of students taught there
float(data.get('registrations',0)) / data.get('series',1)
])
return response
def getRegistrationTypesAveragesByYear():
srs = EventRegistration.objects.all()
eligible_years = [x['event__year'] for x in srs.values('event__year').annotate(Count('event__year'))]
eligible_years.sort()
year_averages = []
for year in eligible_years:
this_year_results = srs.filter(event__year=year).annotate(
student=Case(When(registration__student=True,then=100),default=0,output_field=IntegerField()),
door=Case(When(registration__payAtDoor=False,then=100),default=0,output_field=IntegerField()),
droppedIn=Case(When(dropIn=True,then=100),default=0,output_field=IntegerField()),
cancellation=Case(When(cancelled=True,then=100),default=0,output_field=IntegerField()),
).aggregate(Student=Avg('student'),Door=Avg('door'),DropIn=Avg('droppedIn'),Cancelled=Avg('cancellation'),year=Min('event__year'))
year_averages.append(this_year_results)
return year_averages
@staff_member_required
def RegistrationTypeAveragesJSON(request):
results = getRegistrationTypesAveragesByYear()
return JsonResponse(results,safe=False)
def getRegistrationReferralCounts(startDate,endDate):
'''
When a user accesses the class registration page through a
referral URL, the marketing_id gets saved in the extra JSON
data associated with that registration. This just returns
counts associated with how often given referral terms appear
in a specified time window (i.e. how many people signed up
by clicking through a referral button).
'''
timeFilters = {}
if startDate:
timeFilters['dateTime__gte'] = startDate
if endDate:
timeFilters['dateTime__lt'] = endDate
regs = Registration.objects.filter(**timeFilters)
counter = Counter([x.data.get('marketing_id',None) for x in regs if isinstance(x.data,dict)] + [None for x in regs if not isinstance(x.data,dict)])
results = [{'code': k or _('None'), 'count': v} for k,v in counter.items()]
return results
@staff_member_required
def RegistrationReferralCountsJSON(request):
startDate = getDateTimeFromGet(request,'startDate')
endDate = getDateTimeFromGet(request,'endDate')
results = getRegistrationReferralCounts(startDate,endDate)
return JsonResponse(results,safe=False)
@staff_member_required
def MultiRegistrationJSON(request):
startDate = getDateTimeFromGet(request,'startDate')
endDate = getDateTimeFromGet(request,'endDate')
timeFilters = {}
if startDate:
timeFilters['dateTime__gte'] = startDate
if endDate:
timeFilters['dateTime__lte'] = endDate
er_counter_sorted = sorted(Counter(
Registration.objects.filter(**timeFilters).annotate(
er_count=Count('eventregistration')).values_list('er_count',flat=True)
).items())
results_list = []
cumulative = 0
total = sum([x[1] for x in er_counter_sorted])
for x in er_counter_sorted:
cumulative += x[1]
results_list.append({
'items': x[0], 'count': x[1], 'cumulative': cumulative,
'pct': 100 * (x[1] / total), 'cumulative_pct': 100 * (cumulative / total)
})
return JsonResponse(results_list,safe=False)
@staff_member_required
def RegistrationHoursJSON(request):
startDate = getDateTimeFromGet(request,'startDate')
endDate = getDateTimeFromGet(request,'endDate')
timeFilters = {}
if startDate:
timeFilters['dateTime__gte'] = startDate
if endDate:
timeFilters['dateTime__lte'] = endDate
hours_counter_sorted = sorted(Counter(
Registration.objects.filter(**timeFilters).annotate(
er_sum=Sum('eventregistration__event__duration')).values_list('er_sum',flat=True)
).items())
results_list = []
cumulative = 0
total = sum([x[1] for x in hours_counter_sorted])
for x in hours_counter_sorted:
cumulative += x[1]
results_list.append({
'hours': x[0], 'count': x[1], 'cumulative': cumulative,
'pct': 100 * (x[1] / total), 'cumulative_pct': 100 * (cumulative / total)
})
return JsonResponse(results_list,safe=False)
@staff_member_required
def AdvanceRegistrationDaysJSON(request):
startDate = getDateTimeFromGet(request,'startDate')
endDate = getDateTimeFromGet(request,'endDate')
timeFilters = {}
if startDate:
timeFilters['dateTime__gte'] = startDate
if endDate:
timeFilters['dateTime__lte'] = endDate
advance_days_sorted = sorted(Counter(
Registration.objects.filter(**timeFilters).annotate(
min_start=Min('eventregistration__event__startTime')
).annotate(
advance=(TruncDate('dateTime') - TruncDate('min_start'))
).values_list(
'advance',flat=True)
).items())
results_list = []
cumulative = 0
total = sum([x[1] for x in advance_days_sorted])
for x in advance_days_sorted:
cumulative += x[1]
results_list.append({
'days': x[0], 'count': x[1], 'cumulative': cumulative,
'pct': 100 * (x[1] / total), 'cumulative_pct': 100 * (cumulative / total)
})
return JsonResponse(results_list,safe=False)
@staff_member_required
def getGeneralStats(request):
# total number of students:
totalStudents = Customer.objects.distinct().count()
numSeries = Series.objects.distinct().count()
totalSeriesRegs = EventRegistration.objects.filter(**{'dropIn':False,'cancelled':False}).values('event','customer__user__email').distinct().count()
# time studio in existence:
firstClass = EventOccurrence.objects.order_by('startTime').values('startTime').first()
if firstClass:
firstStartTime = firstClass['startTime']
else:
firstStartTime = timezone.now()
timeDiff = relativedelta(timezone.now(),firstStartTime)
totalTime = '%s years, %s months, %s days' % (timeDiff.years, timeDiff.months,timeDiff.days)
return (totalStudents,numSeries,totalSeriesRegs,totalTime)
@staff_member_required
def getBestCustomersJSON(request):
bestCustomersLastTwelveMonths = Customer.objects.values(
'first_name','last_name'
).filter(**{
'eventregistration__registration__dateTime__gte': ensure_timezone(
datetime(timezone.now().year - 1,timezone.now().month,timezone.now().day)
),
'eventregistration__dropIn':False,'eventregistration__cancelled':False
}).annotate(Count('eventregistration')).order_by('-eventregistration__count')[:10]
bestCustomersAllTime = Customer.objects.values(
'first_name','last_name'
).filter(**{
'eventregistration__dropIn':False,
'eventregistration__cancelled':False
}).annotate(Count('eventregistration')).order_by('-eventregistration__count')[:10]
mostActiveTeachersThisYear = SeriesTeacher.objects.filter(
event__year=timezone.now().year
).exclude(
staffMember__instructor__status=Instructor.InstructorStatus.guest
).values_list(
'staffMember__firstName','staffMember__lastName'
).annotate(Count('staffMember')).order_by('-staffMember__count')
bestCustomerData = {
'bestCustomersLastTwelveMonths': list(bestCustomersLastTwelveMonths),
'bestCustomersAllTime': list(bestCustomersAllTime),
'mostActiveTeachersThisYear': list(mostActiveTeachersThisYear),
}
return JsonResponse(bestCustomerData)
|
#!/usr/bin/env python
# coding=utf-8
import re
import time
import subprocess as sp
from concurrent import futures
def exec_cmd(cmd):
process = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
try:
start_time = time.time()
time_out = 30
while True:
if process.poll() is not None:
break
time.sleep(1)
delta_time = time.time() - start_time
if delta_time > time_out:
process.terminate()
print '执行超时了'
return False
process.wait()
ret_code = process.returncode
err_msg = process.stderr.read().decode('utf-8')
if re.search('Connection refused|Permission denied', err_msg) is None:
if ret_code != 0:
print '返回码:', ret_code, '错误信息:', err_msg
return False
else:
print '执行成功'
return True
else:
print '返回码:', ret_code, '错误信息:', err_msg
return False
except Exception, e:
print '错误信息:', str(e)
return False
def callback_after(arg):
res = arg.result()
try:
if res:
print 'callback_after收到返回信息', res
else:
print 'callback_after收到返回信息', res
except Exception, e:
print 'callback exception,', str(e)
return
def release(info_list):
# ret = list()
# task_list = list()
exector = futures.ThreadPoolExecutor()
for info in info_list:
exector.submit(exec_cmd, info['CMD']).add_done_callback(callback_after)
return
if __name__ == '__main__':
infos = [{
'CMD': '/usr/bin/ssh -l root 192.168.152.183 "touch /tmp/test.txt; echo `date` >> /tmp/test.txt"'
}]
release(infos)
|
#!/usr/bin/env python
import numpy as np, os, sys, joblib
import joblib
import tensorflow as tf
from tensorflow import keras
from scipy.io import loadmat
def create_model():
# define two sets of inputs
inputA = keras.layers.Input(shape=(5000,12))
inputB = keras.layers.Input(shape=(2,))
conv1 = keras.layers.Conv1D(filters=128, kernel_size=8,input_shape=(5000,12), padding='same')(inputA)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.Activation(activation='relu')(conv1)
conv2 = keras.layers.Conv1D(filters=256, kernel_size=5, padding='same')(conv1)
conv2 = keras.layers.BatchNormalization()(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
conv3 = keras.layers.Conv1D(128, kernel_size=3,padding='same')(conv2)
conv3 = keras.layers.BatchNormalization()(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
gap_layer = keras.layers.GlobalAveragePooling1D()(conv3)
output_layer = keras.layers.Dense(24, activation='sigmoid')(gap_layer) #HUSK Å SETTE TIL 24
mod1 = keras.Model(inputs=inputA, outputs=output_layer)
#mod1 = keras.layers.add([mod1,mod1_shortcut])
# the second branch opreates on the second input
mod2 = keras.layers.Dense(100, activation="relu")(inputB) # 2 -> 100
mod2 = keras.layers.Dense(50, activation="relu")(mod2) # Added this layer
mod2 = keras.Model(inputs=inputB, outputs=mod2)
# combine the output of the two branches
combined = keras.layers.concatenate([mod1.output, mod2.output])
# apply a FC layer and then a regression prediction on the
# combined outputs
z = keras.layers.Dense(24, activation="sigmoid")(combined) #HUSK Å SETTE TIL 24
# our model will accept the inputs of the two branches and
# then output a single value
model = keras.Model(inputs=[mod1.input, mod2.input], outputs=z)
model.compile(loss=tf.keras.losses.BinaryCrossentropy(), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics=[tf.keras.metrics.BinaryAccuracy(
name='accuracy', dtype=None, threshold=0.5), tf.keras.metrics.AUC(num_thresholds=200, curve="ROC", summation_method="interpolation",
name="AUC", multi_label=True, label_weights=None)])
return model
def run_12ECG_classifier(data,header_data,loaded_model):
#HUSK Å SETTE TIL UNCOMMENTE threshold
threshold = np.array([0.21551216, 0.20299779, 0.0955278 , 0.17289791, 0.18090656, 0.2227711 , 0.16741777, 0.22866722,
0.27118915, 0.23771854, 0.0912293 , 0.09410764, 0.20950935, 0.34517996, 0.02659288, 0.23399662, 0.15980351, 0.16177394,
0.20402484, 0.25333636, 0.25657814, 0.22106934, 0.45621441, 0.0743871])
# Use your classifier here to obtain a label and score for each class.
model = loaded_model
padded_signal = keras.preprocessing.sequence.pad_sequences(data, maxlen=5000, truncating='post',padding="post")
reshaped_signal = padded_signal.reshape(1,5000,12)
gender = header_data[14][6:-1]
age=header_data[13][6:-1]
if gender == "Male":
gender = 0
elif gender == "male":
gender = 0
elif gender =="M":
gender = 0
elif gender == "Female":
gender = 1
elif gender == "female":
gender = 1
elif gender == "F":
gender = 1
elif gender =="NaN":
gender = 2
# Age processing - replace with nicer code later
if age == "NaN":
age = -1
else:
age = int(age)
demo_data = np.asarray([age,gender])
reshaped_demo_data = demo_data.reshape(1,2)
combined_data = [reshaped_signal,reshaped_demo_data]
score = model.predict(combined_data)[0]
binary_prediction = score > threshold
binary_prediction = binary_prediction * 1
classes = ['10370003', '111975006', '164889003', '164890007', '164909002', '164917005',
'164934002', '164947007', '17338001', '251146004', '270492004', '39732003',
'426177001', '426627000', '426783006' ,'427084000' ,'427393009', '445118002',
'47665007' ,'59118001', '59931005', '63593006', '698252002', '713426002']
return binary_prediction, score, classes
def load_12ECG_model(model_input):
model = create_model()
f_out='model.h5'
filename = os.path.join(model_input,f_out)
model.load_weights(filename)
return model
|
from math import sqrt
class Numbers:
''' Contains frequently used methods for numbers '''
def isPrime(self, num):
if num <= 1:
return False
for divisor in range(2, int(sqrt(num)+1)):
if num%divisor == 0:
return False
return True
def isArmstrong(self, num):
sum, num_clone = 0, num
while(num > 0):
reminder = num%10
sum += (reminder**3)
num = num//10
return num_clone == sum
|
from textwrap import dedent
import pytest
import env
from lightyear import LY
from lightyear.errors import UnsupportedCommaNesting
# SELECTORS
def test_simplest():
i = 'body\n width: 32px'
o = 'body{width:32px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_type_sel():
i = dedent('''
body
color: #000000
''')
o = 'body{color:#000000;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_type_multiple_properties():
i = dedent('''
div
color: #000000
display: block
width: 32px
''')
o = 'div{color:#000000;display:block;width:32px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_multiple_values():
i = dedent('''
div
color: #000000
border: 1px 2px 3px 4px
''')
o = 'div{color:#000000;border:1px 2px 3px 4px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_universal_sel():
i = dedent('''
*
border: none
''')
o = '*{border:none;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_attribute_sel():
i = dedent('''
a[class=happy]
display: inline
''')
o = 'a[class=happy]{display:inline;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_id_sel():
i = dedent('''
#first
display: block
''')
o = '#first{display:block;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_id_type_sel():
i = dedent('''
a#first
display: block
''')
o = 'a#first{display:block;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_pseudo_class_param():
i = dedent('''
li:nth-child(2)
color: black
''')
o = 'li:nth-child(2){color:black;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_pseudo_class_param_expr():
i = dedent('''
i = 2
li:nth-child(i)
color: black
''')
o = 'li:nth-child(2){color:black;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_pseudo_class_noparam():
i = dedent('''
a:hover
color: white
''')
o = 'a:hover{color:white;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
# def test_pseudo_class_not():
# i = dedent('''
# p:not(#example)
# background-color: yellow
# ''')
# o = 'p:not(#example) {background-color: yellow;}'
# ly = LY()
# ly.eval(i)
# assert ly.css() == o
def test_parent_selector_a():
i = dedent('''
p
&#first
background-color: yellow
''')
o = 'p#first{background-color:yellow;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_parent_selector_b():
i = dedent('''
p
a
&#first
background-color: yellow
''')
o = 'p a#first{background-color:yellow;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_parent_selector_c():
i = dedent('''
p
a
&:hover
background-color: yellow
''')
o = 'p a:hover{background-color:yellow;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_parent_selector_comma_a():
i = dedent('''
p, h1
span a
color: #ffffff
''')
o = 'p span a,h1 span a{color:#ffffff;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_parent_selector_comma_b():
i = dedent('''
p
span, div
color: #ffffff
''')
o = 'p span,p div{color:#ffffff;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_parent_selector_comma_c():
i = dedent('''
p, div
span, div
color: #ffffff
''')
ly = LY()
with pytest.raises(UnsupportedCommaNesting):
ly.eval(i)
def test_parent_selector_comma_d():
i = dedent('''
#contact
div
a, p, i
color: #ffffff
''')
o = '#contact div a,#contact div p,#contact div i{color:#ffffff;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_multiple_scopes():
i = dedent('''
p
color: #FFFFFF
h2
color: #DDDDDD
''')
o = 'p{color:#ffffff;}h2{color:#dddddd;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
# NUMERIC
def test_addition():
i = dedent('''
li
width: 8px + 2
''')
o = 'li{width:10px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_subtraction():
i = dedent('''
.button
left: 0px - 4px
''')
o = '.button{left:-4px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_multiplication():
i = dedent('''
.button
width: 4em * 8
''')
o = '.button{width:32em;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_division_int():
i = dedent('''
div#first.button
width: 20px / 4
''')
o = 'div#first.button{width:5px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_division_float_a():
i = dedent('''
#last
height: 10px / 4
''')
o = '#last{height:2.5px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_division_float_b():
i = dedent('''
#last
height: 10px / 3
''')
o = '#last{height:3.33333px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
# PROGRAMMATIC
def test_variable_a():
i = dedent('''
x = #000000
body
color: x
''')
o = 'body{color:#000000;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_variable_b():
i = dedent('''
x = 16px
body
border: x + 8px
''')
o = 'body{border:24px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_mixin_decl_without_call():
i = dedent('''
mixin():
width: 10px
height: 20px
.example
width: 5px
''')
o = '.example{width:5px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_mixin_a():
i = dedent('''
mixin():
width: 10px
height: 20px
.example
color: white
mixin()
''')
o = '.example{color:white;width:10px;height:20px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_mixin_b():
i = dedent('''
mixin(x y):
width: x
height: y
.example
mixin(5px 10px)
''')
o = '.example{width:5px;height:10px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_mixin_c():
i = dedent('''
mixin(x y):
width: x + 5
height: y + 5
.example
mixin(5px 10px)
''')
o = '.example{width:10px;height:15px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_mixin_variable_a():
i = dedent('''
mixin():
width: 5px + x
x = 20px
.example
mixin()
''')
o = '.example{width:25px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_mixin_variable_b():
i = dedent('''
mixin(w v):
width: w + v + x
x = 20px
.example
mixin(30px 50px)
''')
o = '.example{width:100px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
# TAGS
def test_flag_property_a():
i = dedent('''
.test
(x) width: 50px
(y) width: 20px
(root.x)
''')
o = '.test{width:50px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_flag_property_b():
i = dedent('''
.test
(x) width: 50px
(y) width: 20px
(root.y)
''')
o = '.test{width:20px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_flag_property_c():
i = dedent('''
.test
(x) width: 50px
(y) height: 20px
(root.x)
(root.y)
''')
o = '.test{width:50px;}.test{height:20px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_flag_property_d():
i = dedent('''
.testA
(x) width: 50px
.testB
(y) height: 20px
(root.x)
(root.y)
''')
o = '.testA{width:50px;}.testB{height:20px;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_flag_property_e():
i = dedent('''
.test
width: 20px
(desktop) width: 50px
(root)
(root.desktop) "@media screen and (min-width:970px)"
''')
o = '.test{width:20px;}@media screen and (min-width:970px){.test{width:50px;}}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_flag_property_f():
i = dedent('''
.test
color: #000000
(x) color: #FFFFFF
(root)
(root.x) "@media screen and (min-width:970px)"
''')
o = '.test{color:#000000;}@media screen and (min-width:970px){.test{color:#ffffff;}}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_flag_block_a():
i = dedent('''
.test
color: #000000
(x) .test
color: #FFFFFF
''')
o = '.test{color:#000000;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_flag_block_b():
i = dedent('''
.test
color: #000000
(x) .test
color: #FFFFFE
(root.x)
''')
o = '.test{color:#fffffe;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_flag_block_c():
i = dedent('''
.test
color: #000000
(x) .test
color: #FFFFFF
(root)
(root.x) "@media screen and (min-width:970px)"
''')
o = '.test{color:#000000;}@media screen and (min-width:970px){.test{color:#ffffff;}}'
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_multilevel_definitions():
i = dedent('''
p
width: 16px
a
color: red
''')
o = 'p{width:16px;}p a{color:red;}'
ly = LY()
ly.eval(i)
assert ly.css() == o
### AT-RULES ###
def test_font_face():
i = dedent('''
@font-face
font-family: Open Sans
src: url("fonts/OpenSans-Regular-webfont.eot")
font-weight: normal
font-weight: 400
font-style: normal
.opensans
font-family: Open Sans
''')
o = ('@font-face{'
'font-family:Open Sans;src:url("fonts/OpenSans-Regular-webfont.eot");'
'font-weight:normal;font-weight:400;font-style:normal;}'
'.opensans{font-family:Open Sans;}')
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_atrule_tag_a():
i = dedent('''
(d) @media screen and (min-width:970px)
body
width: 100%
(root.d)
''')
o = ('@media screen and (min-width:970px){body{width:100%;}}')
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_atrule_tag_b():
i = dedent('''
(d) @media screen and (min-width:970px)
body
width: 100%
(root)
''')
o = ''
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_atrule_tag_c():
i = dedent('''
(d) @media screen and (min-width:970px)
body
width: 100%
(root.m)
''')
o = ''
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_keyframe_a():
i = dedent('''
div
width: 100px
height: 100px
background: red
position: relative
animation: warpmove 5s infinite
@keyframes warpmove
0%
top: 0px
background: red
width: 100px
100%
top: 200px
background: yellow
width: 300px
''')
o = ('div{width:100px;height:100px;background:red;position:relative;animation:warpmove 5s infinite;}'
'@keyframes warpmove{0%{top:0px;background:red;width:100px;}'
'100%{top:200px;background:yellow;width:300px;}}')
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_keyframe_b():
i = dedent('''
div
width: 100px
height: 100px
background: red
position: relative
animation: warpmove 5s infinite
@keyframes warpmove
0%
top: 0px
background: red
width: 100px
(maybe) 50%
top: 170px
background: blue
width: 50px
100%
top: 200px
background: yellow
width: 300px
(root)
''')
o = ('div{width:100px;height:100px;background:red;position:relative;animation:warpmove 5s infinite;}'
'@keyframes warpmove{0%{top:0px;background:red;width:100px;}'
'100%{top:200px;background:yellow;width:300px;}}')
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_keyframe_c():
i = dedent('''
div
width: 100px
height: 100px
background: red
position: relative
animation: warpmove 5s infinite
@keyframes warpmove
(a) 0%
top: 0px
background: red
width: 100px
(b) 0%
top: 170px
background: blue
width: 50px
(a) 100%
top: 200px
background: yellow
width: 300px
(b) 100%
top: 300px
background: yellow
width: 500px
(root)
(root.a)
''')
o = ('div{width:100px;height:100px;background:red;position:relative;animation:warpmove 5s infinite;}'
'@keyframes warpmove{0%{top:0px;background:red;width:100px;}'
'100%{top:200px;background:yellow;width:300px;}}')
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_keyframe_d():
i = dedent('''
div
width: 100px
height: 100px
background: red
position: relative
animation: warpmove 5s infinite
@keyframes warpmove
(a) 0%
top: 0px
background: red
width: 100px
(b) 0%
top: 170px
background: blue
width: 50px
(a) 100%
top: 200px
background: yellow
width: 300px
(b) 100%
top: 300px
background: yellow
width: 500px
(root)
(root.b)
''')
o = ('div{width:100px;height:100px;background:red;position:relative;animation:warpmove 5s infinite;}'
'@keyframes warpmove{0%{top:170px;background:blue;width:50px;}'
'100%{top:300px;background:yellow;width:500px;}}')
ly = LY()
ly.eval(i)
assert ly.css() == o
def test_keyframe_e():
i = dedent('''
start = 0%
end = 100%
div
width: 100px
height: 100px
background: red
position: relative
animation: warpmove 5s infinite
@keyframes warpmove
start
top: 0px
background: red
width: 100px
end
top: 200px
background: yellow
width: 300px
''')
o = ('div{width:100px;height:100px;background:red;position:relative;animation:warpmove 5s infinite;}'
'@keyframes warpmove{0%{top:0px;background:red;width:100px;}'
'100%{top:200px;background:yellow;width:300px;}}')
ly = LY()
ly.eval(i)
assert ly.css() == o
|
from tkinter import *
from .widgetBase import *
class ButtonOBJ(widget):
def setOnClick(self, addr):
self.attributes["onClick"] = addr
def draw(self):
#try:
#print(getattr(self.window, self.attributes["onClick"]))
obj = Button(self.window, text=self.attributes["text"], command=getattr(self.winOBJ, self.attributes["onClick"]))#getattr(self.callsClass, self.attributes["onClick"]))
obj.pack()
self.obj = obj
return obj, self.window
#except Exception as e:
#print(e)
obj = Button(self.window, text=self.attributes["text"])
obj.pack()
self.obj = obj
return obj, self.window
#else:
raise Exception("An Error Occured")
pass
|
# get input
lines = []
with open('input.txt') as file:
for line in file:
lines.append(line)
def getRow( rowString, rows ):
for rowHalf in rowString:
rows = halveRow( rowHalf, rows )
# print(rows)
return rows[0]
def halveRow( half, rows ):
if half == "F":
return rows[:int(len(rows)/2)]
else : return rows[int(len(rows)/2):]
def getCol( colString, cols ):
for colHalf in colString:
cols = halveCol( colHalf, cols )
# print(cols)
return cols[0]
def halveCol( half, cols ):
if half == "L":
return cols[:int(len(cols)/2)]
else : return cols[int(len(cols)/2):]
def getSeatID( passString, rows, cols ):
row = getRow( passString[:7], rows )
col = getCol( passString[7:-1], cols )
return 8 * row + col
rows = [ i for i in range(128)]
cols = [ i for i in range(8)]
# print(getRow( "FBFBBFF", rows ))
# print(getCol( "RLR", cols))
#print(getSeatID( "FBFBBFFRLR\n", rows, cols ))
seats=[]
for i in lines:
seats.append(getSeatID( i, rows, cols ))
print(max(seats))
seats.sort()
for i in range(len(seats)):
if seats[i] - seats[i+1] == -2:
print(seats[i])
print(seats[i+1])
break
|
import os
import pathlib
import everett.manager
import everett.ext.yamlfile
default_config_yaml = './config.yaml'
if os.environ.get('MMBC_CONFIG_FILE') is not None:
config_yaml = os.environ.get('MMBC_CONFIG_FILE')
print(f'Using config file: {config_yaml}')
else:
config_yaml = default_config_yaml
# TODO do not have a default yaml file - just add ConfigYamlEnv if user provides it
manager = everett.manager.ConfigManager([
everett.manager.ConfigOSEnv(),
everett.ext.yamlfile.ConfigYamlEnv([config_yaml]),
everett.manager.ConfigDictEnv({
# db file is relative to the working dir, relative path is the path 'raw' after the three initial slashses
'MMBC_DATABASE_URL': 'sqlite+pysqlite:///db/database.sqlite',
'MMBC_SLEEP': 1,
'MMBC_DRY_RUN': 'true',
'MMBC_MIN_PROFITABILITY_RATE': '0.001',
'MMBC_MAX_QTY_PER_ORDER': '0.007',
'MMBC_MAX_OPEN_ORDERS': 3,
'MMBC_MAX_OPEN_TAKER_ORDERS': 2,
'MMBC_SHOULD_CANCEL_ORDER': 'false', # a very small number to indicate no cancel
'MMBC_CANCEL_ORDER_THRESHOLD': '0.00000001', # a very small number to indicate no cancel
'MMBC_EXCHANGE_BINANCE_LOOP_DELAY': 1,
'MMBC_EXCHANGE_BORDERLESS_LOOP_DELAY': 1,
'MMBC_EXCHANGE_BORDERLESS_PARTIAL_ORDER': 'false',
'MMBC_EXCHANGE_BORDERLESS_MAX_NRG_FEE_PER_TX': 20,
'MMBC_EXCHANGE_BORDERLESS_MAX_NRG_FEE_PER_DAY': 100,
'MMBC_EXCHANGE_BORDERLESS_MAX_NRG_ACCEPT_DAILY': 30000,
'MMBC_EXCHANGE_BORDERLESS_DEPOSIT_LENGTH': 100,
'MMBC_EXCHANGE_BORDERLESS_SETTLEMENT_WINDOW_LENGTH': 150,
'MMBC_EXCHANGE_DESTINATION_MINER_SCOOKIE': 'testCookie123',
})
])
config = manager.with_namespace('mmbc')
__all__ = ['config']
|
from typing import List, Tuple
from pyrep.objects.proximity_sensor import ProximitySensor
from pyrep.objects.shape import Shape
from pyrep.objects.object import Object
from rlbench.backend.task import Task
from rlbench.backend.conditions import DetectedCondition
class EmptyDishwasher(Task):
def init_task(self) -> None:
success_detector = ProximitySensor('success')
plate = Shape('dishwasher_plate')
self.register_graspable_objects([plate])
self.register_success_conditions(
[DetectedCondition(plate, success_detector, negated=True)])
def init_episode(self, index: int) -> List[str]:
return ['empty the dishwasher', 'take dishes out of dishwasher',
'open the dishwasher door, slide the rack out and remove the '
'dishes']
def variation_count(self) -> int:
return 1
def base_rotation_bounds(self) -> Tuple[List[float], List[float]]:
return [0, 0, -3.14 / 2.], [0, 0, 3.14 / 2.]
def boundary_root(self) -> Object:
return Shape('boundary_root')
|
import unittest
import smithwilson as sw
import numpy as np
class TestSmithWilson(unittest.TestCase):
def test_ufr_discount_factor(self):
"""Test creation of UFR discount factor vector"""
# Input
ufr = 0.029
t = np.array([0.25, 1.0, 5.0, 49.5, 125.0])
# Expected Output
expected = np.array([0.992878614, 0.971817298, 0.866808430, 0.242906395, 0.028059385])
# Actual Output
actual = sw.ufr_discount_factor(ufr=ufr, t=t)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="UFR discount factors not matching")
def test_calculate_prices(self):
"""Test calculation of zero-coupon bond price vector"""
# Input
r = np.array([0.02, 0.025, -0.033, 0.01, 0.0008])
t = np.array([0.25, 1.0, 5.0, 49.5, 125.0])
# Expected Output
expected = np.array([0.995061577, 0.975609756, 1.182681027, 0.611071456, 0.904873593])
# Actual Output
actual = sw.calculate_prices(rates=r, t=t)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Prices not matching")
def test_wilson_function_symmetric(self):
"""Test creation of a symmetric Wilson-function matrix (t1 = t2)"""
# Input
t = np.array([0.25, 1.0, 5.0, 49.5, 125.0]).reshape((-1, 1))
ufr = 0.029
alpha = 0.2
# Expected Output
expected = np.array([[0.00238438, 0.00872884, 0.02719467, 0.01205822, 0.00139298],
[0.00872884, 0.03320614, 0.10608305, 0.04720974, 0.00545372],
[0.02719467, 0.10608305, 0.42652097, 0.2105409 , 0.02432211],
[0.01205822, 0.04720974, 0.2105409 , 0.55463306, 0.06747646],
[0.00139298, 0.00545372, 0.02432211, 0.06747646, 0.01928956]])
# Actual Output
actual = sw.wilson_function(t1=t, t2=t, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Wilson functions not matching")
def test_wilson_function_asymmetric_t1_lt_t2(self):
"""Test creation of a symmetric Wilson-function matrix (t1 != t2) with length of t1 > length of t2"""
# Input
t_obs = np.array([0.25, 1.0, 5.0, 49.5, 125.0]).reshape((-1, 1))
t_target = np.array([0.25, 0.5, 1.0, 2.0, 2.5, 3.5, 5.0, 10.0, 20.0, 49.5, 125.0]).reshape((-1, 1))
ufr = 0.029
alpha = 0.2
# Expected Output
expected = np.array([[0.00238438, 0.00872884, 0.02719467, 0.01205822, 0.00139298],
[0.00463874, 0.01723526, 0.0539627 , 0.0239447 , 0.00276612],
[0.00872884, 0.03320614, 0.10608305, 0.04720974, 0.00545372],
[0.015444 , 0.05969492, 0.20375322, 0.0917584 , 0.01060004],
[0.01817438, 0.07046799, 0.24880429, 0.11307011, 0.013062 ],
[0.02260267, 0.08794588, 0.33012767, 0.15383656, 0.01777143],
[0.02719467, 0.10608305, 0.42652097, 0.2105409 , 0.02432211],
[0.03225016, 0.12614043, 0.54769846, 0.36498556, 0.04216522],
[0.02751232, 0.10770227, 0.47881259, 0.54833094, 0.06336226],
[0.01205822, 0.04720974, 0.2105409 , 0.55463306, 0.06747646],
[0.00139298, 0.00545372, 0.02432211, 0.06747646, 0.01928956]])
# Actual Output
actual = sw.wilson_function(t1=t_target, t2=t_obs, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Wilson functions not matching")
def test_wilson_function_asymmetric_t2_lt_t1(self):
"""Test creation of a symmetric Wilson-function matrix (t1 != t2) with length of t2 > length of t1"""
# Input
t_target = np.array([0.50, 1.5, 7.0, 22.5]).reshape((-1, 1))
t_obs = np.array([0.25, 1.0, 2.0, 2.5, 5.0, 10.0, 20.0]).reshape((-1, 1))
ufr = 0.032
alpha = 0.15
# Expected Output
expected = np.array([[0.00263839, 0.00990704, 0.01791847, 0.02129457, 0.03324991, 0.04184617, 0.03736174],
[0.00714378, 0.02751832, 0.05096578, 0.06087744, 0.09600535, 0.12138299, 0.1085669 ],
[0.01939785, 0.07563626, 0.14568738, 0.17843321, 0.31674624, 0.45088288, 0.42190812],
[0.01768861, 0.06909389, 0.13384921, 0.16464728, 0.3035725 , 0.51271549, 0.69668792]])
# Actual Output
actual = sw.wilson_function(t1=t_target, t2=t_obs, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Wilson functions not matching")
def test_fit_parameters(self):
"""Test estimation of Smith-Wilson parameter vector ζ"""
# Input
r = np.array([0.02, 0.025, -0.033, 0.01, 0.0008]).reshape((-1, 1))
t = np.array([0.25, 1.0, 5.0, 49.5, 125.0]).reshape((-1, 1))
ufr = 0.029
alpha = 0.2
# Expected Output
expected = np.array([-42.78076209, 23.4627511, -3.96498616, 8.92604195, -75.22418515]).reshape((-1, 1))
# Actual Output
actual = sw.fit_parameters(rates=r, t=t, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Parameter not matching")
def test_fit_smithwilson_rates_actual(self):
"""Test estimation of yield curve fitted with the Smith-Wilson algorithm.
This example uses an actual example from EIOPA. Deviations must be less than 1bps (0.01%).
Source: https://eiopa.europa.eu/Publications/Standards/EIOPA_RFR_20190531.zip
EIOPA_RFR_20190531_Term_Structures.xlsx; Tab: RFR_spot_no_VA; Switzerland
"""
# Input
r = np.array([-0.00803, -0.00814, -0.00778, -0.00725, -0.00652,
-0.00565, -0.0048, -0.00391, -0.00313, -0.00214,
-0.0014, -0.00067, -0.00008, 0.00051, 0.00108,
0.00157, 0.00197, 0.00228, 0.0025, 0.00264,
0.00271, 0.00274, 0.0028, 0.00291, 0.00309]).reshape((-1, 1))
t = np.array([float(y + 1) for y in range(len(r))]).reshape((-1, 1)) # 1.0, 2.0, ..., 25.0
ufr = 0.029
alpha = 0.128562
t_target = np.array([float(y + 1) for y in range(65)]).reshape((-1, 1))
# Expected Output
expected = np.array([-0.00803, -0.00814, -0.00778, -0.00725, -0.00652,
-0.00565, -0.0048, -0.00391, -0.00313, -0.00214,
-0.0014, -0.00067, -0.00008, 0.00051, 0.00108,
0.00157, 0.00197, 0.00228, 0.0025, 0.00264,
0.00271, 0.00274, 0.0028, 0.00291, 0.00309,
0.00337, 0.00372, 0.00412, 0.00455, 0.00501,
0.00548, 0.00596, 0.00644, 0.00692, 0.00739,
0.00786, 0.00831, 0.00876, 0.00919, 0.00961,
0.01002, 0.01042, 0.01081, 0.01118, 0.01154,
0.01189, 0.01223, 0.01255, 0.01287, 0.01318,
0.01347, 0.01376, 0.01403, 0.0143, 0.01456,
0.01481, 0.01505, 0.01528, 0.01551, 0.01573,
0.01594, 0.01615, 0.01635, 0.01655, 0.01673]).reshape((-1, 1))
# Actual Output
actual = sw.fit_smithwilson_rates(rates_obs=r, t_obs=t, t_target=t_target, ufr=ufr, alpha=alpha)
# Assert - Precision of 4 decimal points equals deviatino of less than 1bps
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=4, err_msg="Fitted rates not matching")
def test_fit_smithwilson_rates_random(self):
"""Test estimation of yield curve fitted with the Smith-Wilson algorithm
This test uses random data points.
"""
# Input
r = np.array([0.02, 0.025, -0.033, 0.01, 0.0008]).reshape((-1, 1))
t = np.array([0.25, 1.0, 5.0, 20.0, 25.0]).reshape((-1, 1))
ufr = 0.029
alpha = 0.12
t_target = np.array([0.25, 0.5, 1.0, 2.0, 2.5, 3.5, 5.0, 10.0, 20.0, 49.5, 125.0]).reshape((-1, 1))
# Expected Output
expected = np.array([0.02, 0.02417656, 0.025, 0.00361999, -0.00733027,
-0.02345319, -0.033, -0.01256218, 0.01, 0.00715949, 0.02015626]).reshape((-1, 1))
# Actual Output
actual = sw.fit_smithwilson_rates(rates_obs=r, t_obs=t, t_target=t_target, ufr=ufr, alpha=alpha)
# Assert
self.assertEqual(type(actual), type(expected), "Returned types not matching")
self.assertTupleEqual(actual.shape, expected.shape, "Shapes not matching")
np.testing.assert_almost_equal(actual, expected, decimal=8, err_msg="Fitted rates not matching")
|
from model.build_model import build_model
from utils.gpu import select_device
import torch
from torch.utils.data import DataLoader
import argparse
from utils.tools import *
from utils.model_info import get_model_info
from model.data_load.datasets import CocoDataset
from model.data_load import simple_collater, AspectRatioBasedSampler
from eval.coco_eval import COCO_Evaluater
from utils.config import cfg, load_config
from utils.visualize import *
class Trainer(object):
def __init__(self, args):
#----------- 1. get gpu info -----------------------------------------------
self.device = select_device(args.Schedule.device.gpus)
self.DP = False
self.dataset = args.Data.dataset_type
#----------- 2. get dataset ------------------------------------------
if self.dataset == 'coco':
self.dataset = CocoDataset(args.Data.test.dataset_path,
set_name='val2017',
pipeline=args.Data.test.pipeline
)
sampler = AspectRatioBasedSampler(self.dataset,
batch_size=args.Schedule.device.batch_size,
drop_last=False
)
self.dataloader = DataLoader(self.dataset,
num_workers=args.Schedule.device.num_workers,
batch_sampler=sampler,
collate_fn=simple_collater
)
#----------- 3. build model -----------------------------------------------
self.model = build_model(args).to(self.device)
self.model_info = get_model_info(self.model, args.Data.test.pipeline.input_size)
print("Model Summary: {}".format(self.model_info))
#------------4. build evaluator--------------------------------
self.evaluator = COCO_Evaluater(self.dataloader, self.device, args)
#------------5. resume training --------------------------------------
if args.Schedule.resume_path:
print('=> Loading model checkpoints from {}'.format(args.Schedule.resume_path))
chkpt = torch.load(args.Schedule.resume_path, map_location=self.device)
self.model.load_state_dict(chkpt['model'])
del chkpt
else:
raise ValueError('Must have resume_path to load checkpoints to evalute')
#-------------6. DP mode ------------------------------
if self.device and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(self.model)
self.model = model.to(self.device)
self.DP = True
def validation(self):
aps = self.evaluator.evalute(self.model)
print(aps)
if __name__ == "__main__":
import sys
# sys.argv = ['train.py', '--b', '40', '--device', '0' ]
default_config_parser = parser = argparse.ArgumentParser(description= 'General Detection config parser')
parser.add_argument('--config', type=str, default='./results/Resnet50_lr0.01_atss/experiments.yaml', help="train config file path")
parser.add_argument('--model_path', type=str, default='./results/Resnet50_lr0.01_atss/backup_epoch29.pt', help="model checkpoints")
parser.add_argument('--batch_size', type=int, default=1, help="batchsize")
opt = parser.parse_args()
load_config(cfg, opt.config, save=False)
cfg.defrost()
cfg.Schedule.resume_path = opt.model_path
cfg.Schedule.device.batch_size = opt.batch_size
cfg.freeze()
Trainer(cfg).validation()
|
# https://codeforces.com/problemset/problem/405/A
n = int(input())
cubes = [int(x) for x in input().split()]
cubes.sort()
cubes = [str(x) for x in cubes]
print(' '.join(cubes))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.