max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
NeuralNet/QuadraticApproximator.py
|
AlexPetrusca/machine-learning
| 0
|
12777751
|
<reponame>AlexPetrusca/machine-learning<filename>NeuralNet/QuadraticApproximator.py
from NeuralNet import NeuralNet
import numpy as np
from matplotlib import pyplot as plt
def f(x):
return x**2
net = NeuralNet(2, [80, 40], 1)
net2 = NeuralNet(2, [80], 1)
learnRange = 50
np.random.seed(2)
for i in range(100000):
x = (learnRange * np.random.rand(1) - learnRange/2).squeeze()
y = (learnRange * np.random.rand(1) - learnRange/2).squeeze()
fx = f(x)
expected = 1 if (y > fx) else 0
net.feed_forward(np.array([[x, y]]))
net.back_propagate(np.array([[expected]]))
print
print net.feed_forward(np.array([[2, 20]])) # 1
print net.feed_forward(np.array([[2, 4.3]])) # 1
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
x = np.linspace(-4, 4, 1000)
y = f(x)
ax1.plot(x, y)
for i in range(1, 1000):
x = (32 * np.random.rand(1) - 16).squeeze()
y = (32 * np.random.rand(1) - 16).squeeze()
result = net.feed_forward(np.array([[x, y]]))
if result > 0.007:
ax1.scatter(x, y, s=10, color='green')
else:
ax1.scatter(x, y, s=10, color="red")
learnRange2 = 3
for i in range(100000):
x = (learnRange2 * np.random.rand(1) - learnRange2/2).squeeze()
y = (learnRange2 * np.random.rand(1) - learnRange2/2).squeeze()
fx = f(x)
expected = 1 if (y > fx) else 0
net2.feed_forward(np.array([[x, y]]))
net2.back_propagate(np.array([[expected]]))
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
x = np.linspace(-1, 1, 1000)
y = f(x)
ax2.plot(x, y)
for i in range(1, 1000):
x = (2 * np.random.rand(1) - 1).squeeze()
y = (2 * np.random.rand(1) - 1).squeeze()
result = net2.feed_forward(np.array([[x, y]]))
if result > 0.7:
ax2.scatter(x, y, s=10, color='green')
else:
ax2.scatter(x, y, s=10, color="red")
plt.show()
| 3.359375
| 3
|
backend/ai4all_api/urls.py
|
kevromster/ai4all
| 0
|
12777752
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework.schemas import get_schema_view
from ai4all_api import views
schema_view = get_schema_view(title='AI4All backend API')
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'submit-camera-items', views.SubmitCameraItemViewSet)
# The API URLs are now determined automatically by the router.
# Additionally, we include the login URLs for the browsable API.
urlpatterns = [
url(r'^schema/$', schema_view),
url(r'^api/', include(router.urls)),
]
| 2.09375
| 2
|
anadroid/testing_framework/work/WorkLoad.py
|
greensoftwarelab/PyAnaDroid
| 0
|
12777753
|
<filename>anadroid/testing_framework/work/WorkLoad.py
from anadroid.testing_framework.work.AbstractWorkLoad import AbstractWorkLoad
class WorkLoad(AbstractWorkLoad):
def __init__(self):
"""implements Workload functionality by providing a naive way to store work unit to be executed in FIFO order."""
super(WorkLoad, self).__init__()
self.work_units = []
self.w_index = 0
def add_unit(self, wunit):
self.work_units.append(wunit)
def consume(self):
if len(self.work_units) > self.w_index:
val = self.work_units[self.w_index]
self.w_index += 1
return val
else:
return None
def flush(self):
self.work_units.clear()
self.w_index = 0
| 2.6875
| 3
|
mwt/mwt_ns.py
|
JinY0ung-Shin/PDNO
| 2
|
12777754
|
<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import List, Tuple
import numpy as np
import math
import os
import h5py
from functools import partial
from .models.utils import train, test, LpLoss, get_filter, UnitGaussianNormalizer
import argparse
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_initializer(name):
if name == 'xavier_normal':
init_ = partial(nn.init.xavier_normal_)
elif name == 'kaiming_uniform':
init_ = partial(nn.init.kaiming_uniform_)
elif name == 'kaiming_normal':
init_ = partial(nn.init.kaiming_normal_)
return init_
class sparseKernel(nn.Module):
def __init__(self,
k, alpha, c=1,
nl = 1,
initializer = None,
**kwargs):
super(sparseKernel,self).__init__()
self.k = k
self.conv = self.convBlock(alpha*k**2, alpha*k**2)
self.Lo = nn.Conv1d(alpha*k**2, c*k**2, 1)
def forward(self, x):
B, c, ich, Nx, Ny, T = x.shape # (B, c, ich, Nx, Ny, T)
x = x.reshape(B, -1, Nx, Ny, T)
x = self.conv(x)
x = self.Lo(x.view(B, c*ich, -1)).view(B, c, ich, Nx, Ny, T)
return x
def convBlock(self, ich, och):
net = nn.Sequential(
nn.Conv3d(och, och, 3, 1, 1),
nn.ReLU(inplace=True),
)
return net
def compl_mul3d(a, b):
# (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)
op = partial(torch.einsum, "bixyz,ioxyz->boxyz")
return torch.stack([
op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),
op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])
], dim=-1)
# fft conv taken from: https://github.com/zongyi-li/fourier_neural_operator
class sparseKernelFT(nn.Module):
def __init__(self,
k, alpha, c=1,
nl = 1,
initializer = None,
**kwargs):
super(sparseKernelFT, self).__init__()
self.modes = alpha
self.weights1 = nn.Parameter(torch.zeros(c*k**2, c*k**2, self.modes, self.modes, self.modes, 2))
self.weights2 = nn.Parameter(torch.zeros(c*k**2, c*k**2, self.modes, self.modes, self.modes, 2))
self.weights3 = nn.Parameter(torch.zeros(c*k**2, c*k**2, self.modes, self.modes, self.modes, 2))
self.weights4 = nn.Parameter(torch.zeros(c*k**2, c*k**2, self.modes, self.modes, self.modes, 2))
nn.init.xavier_normal_(self.weights1)
nn.init.xavier_normal_(self.weights2)
nn.init.xavier_normal_(self.weights3)
nn.init.xavier_normal_(self.weights4)
self.Lo = nn.Conv1d(c*k**2, c*k**2, 1)
# self.Wo = nn.Conv1d(c*k**2, c*k**2, 1)
self.k = k
def forward(self, x):
B, c, ich, Nx, Ny, T = x.shape # (B, c, ich, N, N, T)
x = x.reshape(B, -1, Nx, Ny, T)
x_fft = torch.fft.rfftn(x, dim=[-3, -2, -1])
x_fft = torch.stack([x_fft.real, x_fft.imag], dim=-1)
# Multiply relevant Fourier modes
l1 = min(self.modes, Nx//2+1)
l2 = min(self.modes, Ny//2+1)
l3 = min(self.modes, T//2+1)
out_ft = torch.zeros(B, c*ich, Nx, Ny, T//2 +1, 2, device=x.device)
out_ft[:, :, :l1, :l2, :l3] = compl_mul3d(
x_fft[:, :, :l1, :l2, :l3], self.weights1[:, :, :l1, :l2, :l3])
out_ft[:, :, -l1:, :l2, :l3] = compl_mul3d(
x_fft[:, :, -l1:, :l2, :l3], self.weights2[:, :, :l1, :l2, :l3])
out_ft[:, :, :l1, -l2:, :l3] = compl_mul3d(
x_fft[:, :, :l1, -l2:, :l3], self.weights3[:, :, :l1, :l2, :l3])
out_ft[:, :, -l1:, -l2:, :l3] = compl_mul3d(
x_fft[:, :, -l1:, -l2:, :l3], self.weights4[:, :, :l1, :l2, :l3])
#Return to physical space
out_ft = torch.complex(out_ft[...,0], out_ft[..., 1])
x = torch.fft.irfftn(out_ft, s= (Nx, Ny, T))
x = F.relu(x)
x = self.Lo(x.view(B, c*ich, -1)).view(B, c, ich, Nx, Ny, T)
return x
class MWT_CZ(nn.Module):
def __init__(self,
k = 3, alpha = 5,
L = 0, c = 1,
base = 'legendre',
initializer = None,
**kwargs):
super(MWT_CZ, self).__init__()
self.k = k
self.L = L
H0, H1, G0, G1, PHI0, PHI1 = get_filter(base, k)
H0r = H0@PHI0
G0r = G0@PHI0
H1r = H1@PHI1
G1r = G1@PHI1
H0r[np.abs(H0r)<1e-8]=0
H1r[np.abs(H1r)<1e-8]=0
G0r[np.abs(G0r)<1e-8]=0
G1r[np.abs(G1r)<1e-8]=0
self.A = sparseKernelFT(k, alpha, c)
self.B = sparseKernelFT(k, alpha, c)
self.C = sparseKernelFT(k, alpha, c)
self.T0 = nn.Conv1d(c*k**2, c*k**2, 1)
if initializer is not None:
self.reset_parameters(initializer)
self.register_buffer('ec_s', torch.Tensor(
np.concatenate((np.kron(H0, H0).T,
np.kron(H0, H1).T,
np.kron(H1, H0).T,
np.kron(H1, H1).T,
), axis=0)))
self.register_buffer('ec_d', torch.Tensor(
np.concatenate((np.kron(G0, G0).T,
np.kron(G0, G1).T,
np.kron(G1, G0).T,
np.kron(G1, G1).T,
), axis=0)))
self.register_buffer('rc_ee', torch.Tensor(
np.concatenate((np.kron(H0r, H0r),
np.kron(G0r, G0r),
), axis=0)))
self.register_buffer('rc_eo', torch.Tensor(
np.concatenate((np.kron(H0r, H1r),
np.kron(G0r, G1r),
), axis=0)))
self.register_buffer('rc_oe', torch.Tensor(
np.concatenate((np.kron(H1r, H0r),
np.kron(G1r, G0r),
), axis=0)))
self.register_buffer('rc_oo', torch.Tensor(
np.concatenate((np.kron(H1r, H1r),
np.kron(G1r, G1r),
), axis=0)))
def forward(self, x):
B, c, ich, Nx, Ny, T = x.shape # (B, c, k^2, Nx, Ny, T)
ns = math.floor(np.log2(Nx))
Ud = torch.jit.annotate(List[Tensor], [])
Us = torch.jit.annotate(List[Tensor], [])
# decompose
for i in range(ns-self.L):
d, x = self.wavelet_transform(x)
Ud += [self.A(d) + self.B(x)]
Us += [self.C(d)]
x = self.T0(x.reshape(B, c*ich, -1)).view(
B, c, ich, 2**self.L, 2**self.L, T) # coarsest scale transform
# reconstruct
for i in range(ns-1-self.L,-1,-1):
x = x + Us[i]
x = torch.cat((x, Ud[i]), 2)
x = self.evenOdd(x)
return x
def wavelet_transform(self, x):
xa = torch.cat([x[:, :, :, fc00:e968:6179::de52:7100 , fc00:e968:6179::de52:7100 , :],
x[:, :, :, fc00:e968:6179::de52:7100 , fdf8:f53e:61e4::18, :],
x[:, :, :, fdf8:f53e:61e4::18, fc00:e968:6179::de52:7100 , :],
x[:, :, :, fdf8:f53e:61e4::18, fdf8:f53e:61e4::18, :]
], 2)
waveFil = partial(torch.einsum, 'bcixyt,io->bcoxyt')
d = waveFil(xa, self.ec_d)
s = waveFil(xa, self.ec_s)
return d, s
def evenOdd(self, x):
B, c, ich, Nx, Ny, T = x.shape # (B, c, 2*k^2, Nx, Ny)
assert ich == 2*self.k**2
evOd = partial(torch.einsum, 'bcixyt,io->bcoxyt')
x_ee = evOd(x, self.rc_ee)
x_eo = evOd(x, self.rc_eo)
x_oe = evOd(x, self.rc_oe)
x_oo = evOd(x, self.rc_oo)
x = torch.zeros(B, c, self.k**2, Nx*2, Ny*2, T,
device = x.device)
x[:, :, :, fc00:e968:6179::de52:7100 , fc00:e968:6179::de52:7100 , :] = x_ee
x[:, :, :, fc00:e968:6179::de52:7100 , fdf8:f53e:61e4::18, :] = x_eo
x[:, :, :, fdf8:f53e:61e4::18, fc00:e968:6179::de52:7100 , :] = x_oe
x[:, :, :, fdf8:f53e:61e4::18, fdf8:f53e:61e4::18, :] = x_oo
return x
def reset_parameters(self, initializer):
initializer(self.T0.weight)
class MWT(nn.Module):
def __init__(self,
ich = 1, k = 3, alpha = 2, c = 1,
nCZ = 3,
L = 0,
base = 'legendre',
initializer = None,
**kwargs):
super(MWT,self).__init__()
self.k = k
self.c = c
self.L = L
self.nCZ = nCZ
self.Lk = nn.Linear(ich, c*k**2)
self.MWT_CZ = nn.ModuleList(
[MWT_CZ(k, alpha, L, c, base,
initializer) for _ in range(nCZ)]
)
self.BN = nn.ModuleList(
[nn.BatchNorm3d(c*k**2) for _ in range(nCZ)]
)
self.Lc0 = nn.Linear(c*k**2, 128)
self.Lc1 = nn.Linear(128, 1)
if initializer is not None:
self.reset_parameters(initializer)
def forward(self, x):
B, Nx, Ny, T, ich = x.shape # (B, Nx, Ny, T, d)
ns = math.floor(np.log2(Nx))
x = self.Lk(x)
x = x.view(B, Nx, Ny, T, self.c, self.k**2)
x = x.permute(0, 4, 5, 1, 2, 3)
for i in range(self.nCZ):
x = self.MWT_CZ[i](x)
x = self.BN[i](x.view(B, -1, Nx, Ny, T)).view(
B, self.c, self.k**2, Nx, Ny, T)
if i < self.nCZ-1:
x = F.relu(x)
x = x.view(B, -1, Nx, Ny, T) # collapse c and k**2
x = x.permute(0, 2, 3, 4, 1)
x = self.Lc0(x)
x = F.relu(x)
x = self.Lc1(x)
return x.squeeze()
def reset_parameters(self, initializer):
initializer(self.Lc0.weight)
initializer(self.Lc1.weight)
def get_args(argv=None):
parser = argparse.ArgumentParser(description = 'Put your hyperparameters')
parser.add_argument('name', type=str, help='experiments name')
parser.add_argument('--batch', default=10, type=int, help = 'batch size')
parser.add_argument('--epochs', default=500, type=int, help = 'Number of Epochs')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--wd', default=1e-4, type=float, help='weight decay')
parser.add_argument('--step_size', default=100, type=int, help='scheduler step size')
parser.add_argument('--gamma', default=0.5, type=float, help='scheduler factor')
parser.add_argument('--multgpu', action='store_true', help='whether multiple gpu or not')
parser.add_argument('--nu', default=1e-5, type=float, help='vorticity in NS equation')
parser.add_argument('--num_data', default=1000, type=int, help='number of data to use, only for which nu=1e-4')
return parser.parse_args(argv)
if __name__=='__main__':
args = get_args()
print(args)
nu = args.nu
S=64
sub=1
if nu==1e-5:
T_in = 10
T = 10
ntrain = 1000
ntest = 200
u = torch.load('../data/ns_V1e-5_N1200_T20.bin')
elif nu==1e-3:
T_in = 10
T = 40
ntrain = 1000
ntest = 200
u = torch.load('../data/ns_V1e-3_N5000_T50.bin')
elif nu==1e-4:
T_in = 10
T = 20
ntrain = args.num_data
ntest = 200
u = torch.load('../data/ns_V1e-4_N10000_T30.bin')
train_a = u[:ntrain,:,:,:T_in]
train_u = u[:ntrain,:,:,T_in:T+T_in]
test_a= u[-ntest:,:,:,:T_in]
test_u = u[-ntest:,:,:,T_in:T+T_in]
a_normalizer = UnitGaussianNormalizer(train_a)
x_train = a_normalizer.encode(train_a)
x_test = a_normalizer.encode(test_a)
y_normalizer = UnitGaussianNormalizer(train_u)
y_train = y_normalizer.encode(train_u)
x_train = x_train.reshape(ntrain,S,S,1,T_in).repeat([1,1,1,T,1])
x_test = x_test.reshape(ntest,S,S,1,T_in).repeat([1,1,1,T,1])
# pad locations (x,y,t)
gridx = torch.tensor(np.linspace(0, 1, S), dtype=torch.float)
gridx = gridx.reshape(1, S, 1, 1, 1).repeat([1, 1, S, T, 1])
gridy = torch.tensor(np.linspace(0, 1, S), dtype=torch.float)
gridy = gridy.reshape(1, 1, S, 1, 1).repeat([1, S, 1, T, 1])
gridt = torch.tensor(np.linspace(0, 1, T+1)[1:], dtype=torch.float)
gridt = gridt.reshape(1, 1, 1, T, 1).repeat([1, S, S, 1, 1])
x_train = torch.cat((gridx.repeat([ntrain,1,1,1,1]), gridy.repeat([ntrain,1,1,1,1]),
gridt.repeat([ntrain,1,1,1,1]), x_train), dim=-1)
x_test = torch.cat((gridx.repeat([ntest,1,1,1,1]), gridy.repeat([ntest,1,1,1,1]),
gridt.repeat([ntest,1,1,1,1]), x_test), dim=-1)
batch_size = args.batch
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, test_u), batch_size=batch_size, shuffle=False)
ich = 13
initializer = get_initializer('xavier_normal') # xavier_normal, kaiming_normal, kaiming_uniform
torch.manual_seed(0)
np.random.seed(0)
alpha = 12
c = 4
k = 3
nCZ = 4
L = 0
model = MWT(ich,
alpha = alpha,
c = c,
k = k,
base = 'legendre', # chebyshev
nCZ = nCZ,
L = L,
initializer = initializer,
).to(device)
learning_rate = args.lr
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=args.wd)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
y_normalizer.cuda()
train_loss = []
test_loss = []
for epoch in tqdm(range(1, epochs+1)):
train_l2 = train(model, train_loader, optimizer, epoch, device,
lossFn = myloss, lr_schedule = scheduler,
post_proc = y_normalizer.decode)
test_l2 = test(model, test_loader, device, lossFn=myloss, post_proc=y_normalizer.decode)
print(f'epoch: {epoch}, train l2 = {train_l2}, test l2 = {test_l2}')
train_loss.append(train_l2)
test_loss.append(test_l2)
torch.save({'train_rel':train_loss, 'test_rel':test_loss}, 'results/loss_{}.bin'.format(args.name))
torch.save(model.state_dict(), 'results/weight_{}.bin'.format(args.name))
| 2
| 2
|
scripts/papers/AAAI17/BikeNYC/predict.py
|
angeliababy/predict_Resnet
| 2
|
12777755
|
from keras.models import load_model
# from matplotlib.font_manager import FontProperties
import cv2
import numpy as np
import exptBikeNYC
size =10
model = exptBikeNYC.build_model(False)
model.load_weights('MODEL/c3.p3.t3.resunit4.lr0.0002.best.h5')
f = open("area.csv", "r")
# 临时存储某时间的人数
person_num = []
# 存储各时间的人数尺寸(n,3,3)
imgs = []
i, l = 0, 0
for line in f:
l += 1
if l == 1:
continue
i += 1
line = line.strip().split(',')
# 将人数转化为小于1的数,后面求实际人数需转化过来
number = (float(line[2]) - 0) / (3073 - 0) * 2 - 1
person_num.append(number)
# 每次读16个数据
if i % (128) == 0:
# 转化成一维数组
person_num = np.array(person_num)
# 改变形状,类似图像形式
person_num = person_num.reshape(16, 8)
imgs.append(person_num)
i = 0
person_num = []
# 训练数据(输入三种类型的数据,并各自转化为多通道形式)
train_x1, train_x2, train_x3, train_y = [], [], [], []
for i in range(1300, 1305):
# 取短期、周期、趋势三组件数据,各不同长度序列
image1 = [imgs[i - 3], imgs[i - 2], imgs[i - 1]]
image2 = [imgs[i - 72], imgs[i - 48], imgs[i - 24]]
image3 = [imgs[i - 484], imgs[i - 336], imgs[i - 168]]
train_x1.append(image1)
train_x2.append(image2)
train_x3.append(image3)
lab = [imgs[i]]
train_y.append(lab) # 最终输出
train_x = [np.array(train_x1), np.array(train_x2), np.array(train_x3)]
train_y = np.array(train_y)
# X_test, Y_test=exptBikeNYC.main()
predict_y = model.predict(train_x)
print((train_y+1)/2*3073)
print(((predict_y+1)/2*3073).astype(int))
# print((predict_y*(60923+192687)-192687))
| 2.421875
| 2
|
BitTorrent-5.2.2/BitTorrent/__init__.py
|
jpabb7/p2pScrapper
| 4
|
12777756
|
<reponame>jpabb7/p2pScrapper<gh_stars>1-10
# -*- coding: UTF-8 -*-
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
version = '5.2.2'
URL = 'http://www.bittorrent.com/'
DONATE_URL = URL + 'donate.html?client=%(client)s'
FAQ_URL = URL + 'FAQ.html?client=%(client)s'
SEARCH_URL = 'http://www.bittorrent.com/search_result.html?client=%(client)s&search=%(search)s'
#LOCALE_URL = URL + 'translations/'
# Moved to BTL. Needed for get_language <- BTL.write_language_file
#LOCALE_URL = 'http://translations.bittorrent.com/'
NAG_FREQUENCY = 3
PORT_RANGE = 5
import sys
assert sys.version_info >= (2, 3, 0), "Python %s or newer required" % '2.3.0'
import os
import logging
import logging.handlers
from StringIO import StringIO
from BTL import BTFailure, InfoHashType
from BTL import atexit_threads
# failure due to user error. Should output differently (e.g., not outputting
# a backtrace).
class UserFailure(BTFailure):
pass
branch = None
p = os.path.realpath(os.path.split(sys.argv[0])[0])
if (os.path.exists(os.path.join(p, '.cdv')) or
os.path.exists(os.path.join(p, '.svn'))):
branch = os.path.split(p)[1]
del p
from BitTorrent.platform import get_temp_subdir, get_dot_dir, is_frozen_exe
if os.name == 'posix':
if os.uname()[0] == "Darwin":
from BTL.translation import _
if "-u" in sys.argv or "--use_factory_defaults" in sys.argv:
logroot = get_temp_subdir()
else:
#logroot = get_home_dir()
logroot = get_dot_dir()
if is_frozen_exe:
if logroot is None:
logroot = os.path.splitdrive(sys.executable)[0]
if logroot[-1] != os.sep:
logroot += os.sep
logname = os.path.split(sys.executable)[1]
else:
logname = os.path.split(os.path.abspath(sys.argv[0]))[1]
logname = os.path.splitext(logname)[0] + '.log'
if logroot != '' and not os.path.exists(logroot):
os.makedirs(logroot)
logpath = os.path.join(logroot, logname)
# becuase I'm generous.
STDERR = logging.CRITICAL + 10
logging.addLevelName(STDERR, 'STDERR')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# set a format which is simpler for console use
#formatter = logging.Formatter(u'%(name)-12s: %(levelname)-8s %(message)s')
formatter = logging.Formatter(u'%(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
bt_log_fmt = logging.Formatter(u'[' + unicode(version) + u' %(asctime)s] %(levelname)-8s: %(message)s',
datefmt=u'%Y-%m-%d %H:%M:%S')
stderr_console = None
old_stderr = sys.stderr
def inject_main_logfile():
# the main log file. log every kind of message, format properly,
# rotate the log. someday - SocketHandler
mainlog = logging.handlers.RotatingFileHandler(filename=logpath,
mode='a', maxBytes=2**20, backupCount=1)
mainlog.setFormatter(bt_log_fmt)
mainlog.setLevel(logging.DEBUG)
logger = logging.getLogger('')
logging.getLogger('').addHandler(mainlog)
logging.getLogger('').removeHandler(console)
atexit_threads.register(lambda : logging.getLogger('').removeHandler(mainlog))
global stderr_console
if not is_frozen_exe:
# write all stderr messages to stderr (unformatted)
# as well as the main log (formatted)
stderr_console = logging.StreamHandler(old_stderr)
stderr_console.setLevel(STDERR)
stderr_console.setFormatter(logging.Formatter(u'%(message)s'))
logging.getLogger('').addHandler(stderr_console)
root_logger = logging.getLogger('')
class StderrProxy(StringIO):
# whew. ugly. is there a simpler way to write this?
# the goal is to stop every '\n' and flush to the log
# otherwise keep buffering.
def write(self, text, *args):
lines = text.split('\n')
for t in lines[:-1]:
if len(t) > 0:
StringIO.write(self, t)
try:
# the docs don't say it, but logging.log is new in 2.4
#logging.log(STDERR, self.getvalue())
root_logger.log(STDERR, self.getvalue())
except:
# logging failed. throwing a traceback would recurse
pass
self.truncate(0)
if len(lines[-1]) > 0:
StringIO.write(self, lines[-1])
sys.stderr = StderrProxy()
def reset_stderr():
sys.stderr = old_stderr
atexit_threads.register(reset_stderr)
| 1.921875
| 2
|
15.py
|
christi-john/hackerrank-python
| 0
|
12777757
|
# https://www.hackerrank.com/challenges/capitalize/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
def solve(s):
ans=""
for i in range(len(s)):
if(i==0 or s[i-1]==" "): ans=ans+s[i].upper()
else: ans=ans+s[i]
return ans
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = solve(s)
fptr.write(result + '\n')
fptr.close()
| 3.8125
| 4
|
article/migrations/0003_auto_20190214_1458.py
|
dolikemc/hostthewaytry
| 0
|
12777758
|
# Generated by Django 2.1.5 on 2019-02-14 13:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('places', '0037_hostel'),
('article', '0002_textarticle_place'),
]
operations = [
migrations.RenameField(
model_name='imagearticle',
old_name='user',
new_name='created_by',
),
migrations.RenameField(
model_name='textarticle',
old_name='user',
new_name='created_by',
),
migrations.AddField(
model_name='imagearticle',
name='place',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='places.Place'),
preserve_default=False,
),
migrations.AlterField(
model_name='imagearticle',
name='picture',
field=models.ImageField(help_text='Picture of your place', upload_to='%Y/%m/%d/'),
),
]
| 1.695313
| 2
|
built-in/TensorFlow/Benchmark/nlp/Nezha-large_for_TensorFlow/utils/utils.py
|
Ascend/modelzoo
| 12
|
12777759
|
<reponame>Ascend/modelzoo
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import time
# report latency and throughput during eval
class LogEvalRunHook(tf.train.SessionRunHook):
def __init__(self, global_batch_size, hvd_rank=-1):
self.global_batch_size = global_batch_size
self.hvd_rank = hvd_rank
self.total_time = 0.0
self.count = 0
self.skipped = 0
self.time_list = []
def before_run(self, run_context):
self.t0 = time.time()
def after_run(self, run_context, run_values):
elapsed_secs = time.time() - self.t0
self.count += 1
# Removing first 2 (arbitrary) number of startup iterations from perf evaluations
if self.count <= 2:
print("Skipping time record for ", self.count, " due to overhead")
self.skipped += 1
else:
self.time_list.append(elapsed_secs)
self.total_time += elapsed_secs
# report throughput during training
class LogTrainRunHook(tf.train.SessionRunHook):
def __init__(self, global_batch_size, hvd_rank=-1, save_checkpoints_steps=1000):
self.global_batch_size = global_batch_size
self.hvd_rank = hvd_rank
self.save_checkpoints_steps = save_checkpoints_steps
self.total_time = 0.0
self.count = 0 # Holds number of iterations, including skipped iterations for fp16 loss scaling
def after_create_session(self, session, coord):
self.init_global_step = session.run(tf.train.get_global_step())
def before_run(self, run_context):
self.t0 = time.time()
return tf.train.SessionRunArgs(
fetches=['step_update:0'])
def after_run(self, run_context, run_values):
elapsed_secs = time.time() - self.t0
self.global_step = run_values.results[0]
self.count += 1
# Removing first step + first two steps after every checkpoint save
if (self.global_step - self.init_global_step) % self.save_checkpoints_steps <= 1:
print("Skipping time record for ", self.global_step, " due to checkpoint-saving/warmup overhead")
else:
self.total_time += elapsed_secs
def end(self, session):
num_global_steps = self.global_step - self.init_global_step
self.skipped = (num_global_steps // self.save_checkpoints_steps) * 2 + \
min(2, num_global_steps % self.save_checkpoints_steps) - 1
| 2.109375
| 2
|
source/registrator/start_registrator.py
|
raldenprog/electronic_vote
| 0
|
12777760
|
<filename>source/registrator/start_registrator.py
from datetime import datetime
from base64 import b64decode, b64encode
from flask import Flask
from flask import request, jsonify
from registrator.config import Config
from Crypto.PublicKey import RSA
from registrator.helper import auth, save_public_key_user, public_key_by_id
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
app = Flask(__name__)
app.config.from_object(Config)
PRIVATE_KEY = RSA.generate(2048)
PUBLIC_KEY = PRIVATE_KEY.publickey()
def sign(encrypted_2_message, private):
hash_encrypted_2_message = SHA256.new(encrypted_2_message)
signature = pkcs1_15.new(private).sign(hash_encrypted_2_message)
return signature
def check_sign(encrypted_2_message, public_key, sign):
"""Проверка подписи от пользоваетеля регистратором"""
hash_encrypted_message = SHA256.new(encrypted_2_message)
try:
pkcs1_15.new(public_key).verify(hash_encrypted_message, sign)
except:
return False
return True
@app.route('/public/<int:id_user>')
def get_public_by_id(id_user):
return public_key_by_id(id_user)[0]
@app.route('/public')
def get_public():
return PUBLIC_KEY.export_key()
@app.route('/auth', methods=['POST'])
def auth_route():
data = request.json
result = auth(data.get('username'), data.get('password'))
if result:
save_public_key_user(result.get('id'), data.get('public_key'))
return jsonify(dict(result)), 200
else:
return {'error_message': 'Неверный логин или пароль'}, 401
@app.route('/status')
def status():
return jsonify({
'start': Config.START_VOTING.strftime('%d.%m.%y %H:%M:%S'),
'accepting': Config.START_ACCEPTING_VOTE.strftime('%d.%m.%y %H:%M:%S'),
'stop_voting': Config.STOP_VOTING.strftime('%d.%m.%y %H:%M:%S'),
}), 200
@app.route('/vote', methods=['POST'])
def vote():
date_time_now = datetime.now()
if Config.START_VOTING > date_time_now:
return {'error_message': 'Голосование еще не начато'}, 200
elif Config.START_ACCEPTING_VOTE > date_time_now > Config.START_VOTING:
data = request.json
sign_user = b64decode(data.get('sign').encode())
message_user = b64decode(data.get('encrypted_message').encode())
public_key_user = public_key_by_id(data.get('id'))
if public_key_user:
checked = check_sign(message_user, RSA.importKey(public_key_user[0]), sign_user)
else:
return {'error_message': 'Участник не подтвердил возможность голосовать'}, 403
if checked:
sign_registrator = sign(message_user, PRIVATE_KEY)
return jsonify({
'sign': b64encode(sign_registrator).decode()
}), 200
else:
return {'error_message': 'Бюллетень не подписан участником'}, 403
elif Config.STOP_VOTING > date_time_now > Config.START_ACCEPTING_VOTE:
return {'error_message': 'Голосование завершено, идет подтверждение голосов'}, 403
elif date_time_now > Config.STOP_VOTING:
return {'error_message': 'Голосование завершено'}, 200
else:
return {'error_message': 'Внутренняя ошибка сервера'}, 500
if __name__ == "__main__":
app.run(host=app.config.get('HOST'), port=app.config.get('PORT'))
| 2.5
| 2
|
HydrogenCpx/xenolithRims.py
|
EFerriss/Hydrogen_Cpx
| 0
|
12777761
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 03 15:14:02 2015
@author: Ferriss
Figure roughly following Padron-Navarta et al. 2014 Figure 8
Comparing olivine and cpx rim formation over time
"""
import pynams.diffusion as diff
import numpy as np
import matplotlib.pyplot as plt
import json
################### User input variables #####################################
savefolder = '../../../CpxPaper/figures/'
celsius = 1100.
lengths_microns = [1000.] * 3
cpx_log10D_m2s = [-12.] * 3
mid_log10D_m2s = [-11.] * 3
#oli_log10D_m2s = np.log10(np.array([3.1E-10, 1.8E-11, 1.0E-11])) # KM98 1100C fast mechanism
oli_log10D_m2s = [-10.] * 3
#time_hours = [0.005, 0.007, 0.01, 0.02, 0.03, 0.04, 0.05]
time_hours = [0.005, 0.01, 0.03, 0.05, 0.1, 0.175, 0.25, 0.375, 0.5, 0.75, 1.]
#time_hours = [0.005, 0.05, 0.5, 1., 4., 8.]
#time_hours = [0.05, 0.5, 1., 2., 4., 8., 10., 15., 20.]
#time_hours = [0.5]
water_fraction = 0.9 # How much does concentration drop to qualify as a rim
resolution_microns = 100.
direction = 0 # 0=[100]*, 1=[010], 2=[001]
points_in_calc = 50
################## Set up and run calculation ################################
v_sat = np.sum(np.ones([points_in_calc, points_in_calc, points_in_calc]))
volume_mm3 = lengths_microns[0] * lengths_microns[1] * lengths_microns[2] / 1E9
center_microns = lengths_microns[direction] / 2.
cpx_percent_water_remaining = np.zeros_like(time_hours)
cpx_rim_location_microns = np.zeros_like(time_hours)
mid_percent_water_remaining = np.zeros_like(time_hours)
mid_rim_location_microns = np.zeros_like(time_hours)
oli_percent_water_remaining = np.zeros_like(time_hours)
oli_rim_location_microns = np.zeros_like(time_hours)
idx = 0
for hours in time_hours:
time_seconds = hours * 3600.
cpx_v, cpx_x, cpx_y = diff.diffusion3Dnpi(lengths_microns, cpx_log10D_m2s, time_seconds,
plot3=False, points=points_in_calc)
cpx_percent_water_remaining[idx] = 100. * (v_sat - np.sum(cpx_v)) / v_sat
cpx_idx_rim = (np.abs(cpx_y[direction][0:points_in_calc/2] - water_fraction)).argmin()
cpx_rim_location_microns[idx] = cpx_x[direction][cpx_idx_rim]
mid_v, mid_x, mid_y = diff.diffusion3Dnpi(lengths_microns, mid_log10D_m2s, time_seconds,
plot3=False, points=points_in_calc)
mid_percent_water_remaining[idx] = 100. * (v_sat - np.sum(mid_v)) / v_sat
mid_idx_rim = (np.abs(mid_y[direction][0:points_in_calc/2] - water_fraction)).argmin()
mid_rim_location_microns[idx] = mid_x[direction][mid_idx_rim]
oli_v, oli_x, oli_y = diff.diffusion3Dnpi(lengths_microns, oli_log10D_m2s, time_seconds,
plot3=False, points=points_in_calc)
oli_percent_water_remaining[idx] = 100. * (v_sat - np.sum(oli_v)) / v_sat
oli_idx_rim = (np.abs(oli_y[direction][0:points_in_calc/2] - water_fraction)).argmin()
oli_rim_location_microns[idx] = oli_x[direction][oli_idx_rim]
# print out cpx info
string = ' '.join(('hours:',
'{:.2f}'.format(hours),
'rim observed at',
'{:.1f}'.format(cpx_rim_location_microns[idx]),
'microns;',
'{:.1f}'.format(cpx_percent_water_remaining[idx]),
'% H lost'))
print string
idx = idx + 1
#plot final outcome - olivine
#f, ax, v, x, y = diff.diffusion3Dnpi(lengths_microns, oli_log10D_m2s, time_seconds,
# plot3=True, points=points_in_calc)
#### Save data to file
a = [time_hours, list(cpx_rim_location_microns), list(mid_rim_location_microns),
list(oli_rim_location_microns)]
workfile = ''.join((savefolder, '-xenolithRims.txt'))
with open(workfile, 'w') as diff_file:
diff_file.write(json.dumps(a))
#%%############## Plot #########################
workfile = ''.join((savefolder, '-xenolithRims.txt'))
with open(workfile, 'r') as rimfile:
a = rimfile.read()
data = json.loads(a)
time_hours = data[0]
cpx_rim_location_microns = data[1]
mid_rim_location_microns = data[2]
oli_rim_location_microns = data[3]
fig = plt.figure()
fig.set_size_inches(6, 5)
ax = fig.add_subplot(111)
plt.style.use('paper')
time = np.array(time_hours) * 60.
ax.set_xlabel('Time (minutes)')
ax.plot(time, cpx_rim_location_microns, '+', markersize=10, mew=3,
label=''.join(('logD=', '{:.1f}'.format(cpx_log10D_m2s[direction]),
' m$^2$/s\n~min for Fe-rich cpx')))
ax.plot(time, mid_rim_location_microns, 'x', markersize=8, mew=2,
label=''.join(('logD=', '{:.1f}'.format(mid_log10D_m2s[direction]),
' m$^2$/s\n~Fe-rich cpx')))
ax.plot(time, oli_rim_location_microns, 'o', markersize=10, mew=1,
label=''.join(('logD=', '{:.1f}'.format(oli_log10D_m2s[direction]),
' m$^2$/s\n~olivine proton-polaron mech.')))
ylab = ''.join(('Distance from rim in center of cube face where\nhydrogen concentration <',
'{:.0f}'.format(water_fraction*100), '% of initial ($\mu$m)'))
tit_volume = ''.join(('{:.1f}'.format(volume_mm3), ' mm$^3$ cube'))
tit_temp = ''.join((' ', '{:.0f}'.format(celsius), ' $\degree$C'))
tit = ', '.join((tit_volume, tit_temp))
ax.set_ylabel(ylab)
ax.set_title(tit)
ax.set_ylim(center_microns, 0)
plt.axhspan(0, resolution_microns, facecolor='red', alpha=0.2)
ax.text(ax.get_xlim()[1]/2., resolution_microns/2.,
'Diffusion profiles unlikely to be observed', ha='center', va='center')
plt.axhspan(center_microns, center_microns - 0.05*center_microns,
facecolor='yellow', alpha=0.5)
ax.text(ax.get_xlim()[0], center_microns - 0.07*center_microns,
' loss at the center', ha='left', va='center')
ax.legend(loc=4)
fig.savefig(''.join((savefolder, 'xenolithRims.png')), dpi=200)
| 2.140625
| 2
|
tests/java/org/python/indexer/data/mod2.py
|
jeff5/jython-whinchat
| 577
|
12777762
|
<reponame>jeff5/jython-whinchat<filename>tests/java/org/python/indexer/data/mod2.py
import distutils.command
def mod2test():
return dir(distutils)
| 1.5625
| 2
|
wfile.py
|
acse-2020/uav
| 0
|
12777763
|
<reponame>acse-2020/uav
import string
import random
letters = string.ascii_letters
with open('postal_data.txt', 'w') as f:
for i in range(60):
line = ["".join([random.choice(letters)for j in range(7)]), str(random.uniform(0,
100)), str(random.uniform(0, 100)), str(random.uniform(0, 5))]
for e in line:
f.write(e + ", ")
f.write("\n")
| 2.78125
| 3
|
mnist_objective_func.py
|
miroblog/AI2017SpringMiniProject
| 0
|
12777764
|
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from datetime import datetime
LOGDIR = '/tmp/17springAI/mnist/objectiveFunc/' + datetime.now().strftime('%Y%m%d-%H%M%S') + '/'
def activation(act_func, logit):
if act_func == "relu":
return tf.nn.relu(logit)
else:
return tf.nn.sigmoid(logit)
def logits(input, size_in, size_out):
w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
logit = (tf.matmul(input, w) + b)
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
tf.summary.histogram("logits", logit)
return logit, w, b
# fully conected layer
def fc_layer(input, size_in, size_out,act_func, name="fc" ):
with tf.name_scope(name):
logit, w, b = logits(input, size_in, size_out)
act = activation(act_func, logit)
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
tf.summary.histogram("activations", act)
return act, w, b
# runs different model each time, hparam is a string specification for the model
# hpram is also used in the created tensorboard summary
def mnist_model(learning_rate, objectiveFunc, hparam, act_func):
tf.reset_default_graph()
sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.4)))
# input layer
x = tf.placeholder(tf.float32, [None, 784], name="x")
x_image = tf.reshape(x, [-1, 28, 28, 1])
# to view images on tensorboard
tf.summary.image('input', x_image, 3)
# label to compare
y_ = tf.placeholder(tf.float32, [None, 10], name="labels")
keep_prob = tf.placeholder(tf.float32)
h1, W1, B1 = fc_layer(x, 784, 100, act_func, "h1")
logit, W2, B2 = logits(h1, 100, 10)
Y = tf.nn.softmax(logit)
## changing loss function
if objectiveFunc == "mean_sq_err":
with tf.name_scope("mean_sq_err"):
mean_sq_err = tf.reduce_mean(tf.contrib.keras.losses.mean_squared_error(Y, y_))
tf.summary.scalar("mean_sq_err", mean_sq_err)
loss = mean_sq_err
elif objectiveFunc == "L2_norm":
with tf.name_scope("L2_norm"):
xent = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logit, labels=y_), name="xent")
L2_lambda = 0.05
L2_norm = xent + \
L2_lambda * (tf.nn.l2_loss(W1) + tf.nn.l2_loss(B1) + tf.nn.l2_loss(W2) + tf.nn.l2_loss(B2))
tf.summary.scalar("L2_norm", L2_norm)
loss = L2_norm
else:
with tf.name_scope("xent"):
xent = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logit, labels=y_), name="xent")
tf.summary.scalar("xent", xent)
loss = xent
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(logit, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", accuracy)
summ = tf.summary.merge_all()
sess.run(tf.global_variables_initializer())
writer_train = tf.summary.FileWriter(LOGDIR + hparam + "_train")
writer_train.add_graph(sess.graph)
writer_test = tf.summary.FileWriter(LOGDIR + hparam + "_test")
writer_test.add_graph(sess.graph)
num_epochs = 200
# training accuracy
list_vacc = list()
for k in range(num_epochs):
print(str(k) + "th epoch")
for i in range(550):
if i % 100 == 0:
batch_xs, batch_ys = mnist.train.next_batch(100)
[train_accuracy, s_train] = sess.run([accuracy, summ],
feed_dict={x: batch_xs, y_: batch_ys})
writer_train.add_summary(s_train, k * 550 + i)
[test_accuracy, s_test] = sess.run([accuracy, summ],
feed_dict={x: mnist.test.images, y_: mnist.test.labels})
writer_test.add_summary(s_test, k * 550 + i)
print('Step {:d}, training accuracy {:g}'.format(k * 550 + i, train_accuracy))
print('Step {:d}, test accuracy {:g}'.format(k * 550 + i, test_accuracy))
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
vacc = accuracy.eval(feed_dict={x: mnist.validation.images, y_: mnist.validation.labels, keep_prob: 1})
list_vacc.append(vacc)
if k > 10 and np.mean(list_vacc[-10:-5]) > np.mean(list_vacc[-5:]):
print("Seems like it starts to overfit, aborting the training")
break
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
def make_hparam_string(act_func, learning_rate, objective):
return "%s,lr_%.0E,%s" % (act_func, learning_rate, objective)
def main():
for act_func in ["sigmoid", "relu"]:
# You can try adding some more learning rates
for learning_rate in [1E-4]:
# Include "False" as a value to try different model architectures:
for objective in ["xent", "mean_sq_err", "L2_norm"]:
# def mnist_model(learning_rate, regularization, hparam):
hparam = make_hparam_string(act_func, learning_rate, objective)
print('Starting run for %s' % hparam)
# Actually run with the new settings
mnist_model(learning_rate, objective, hparam, act_func)
if __name__ == '__main__':
main()
| 2.96875
| 3
|
surefire/decoders/linear.py
|
jasonkriss/surefire
| 0
|
12777765
|
<filename>surefire/decoders/linear.py
from torch.nn import Linear
from surefire.decoders import Decoder
class LinearDecoder(Decoder):
def __init__(self, *args, **kwargs):
super().__init__()
self._linear = Linear(*args, **kwargs)
def forward(self, x):
return self._linear(x)
| 2.515625
| 3
|
ario/content.py
|
wish-team/ario
| 9
|
12777766
|
<reponame>wish-team/ario
from functools import wraps
from ario.status import moved_temporarily, moved_permanently
import ujson
def json(handler):
@wraps(handler)
def wrapper(request, response, *args):
response.content_type = 'application/json'
response.response_encoding = 'utf-8'
body = handler(request, response, *args)
body = ujson.dumps(body)
body = response.encode_response(body)
return body
return wrapper
def html(handler):
@wraps(handler)
def wrapper(request, response, *args):
response.content_type = 'text/html'
response.response_encoding = 'utf-8'
body = handler(request, response, *args)
body = response.encode_response(body)
return body
return wrapper
def redirect(url, perm=False):
def decorator(handler):
@wraps(handler)
def wrapper(request, response, *args):
ret = None
if not perm:
response.status = moved_temporarily()
ret = b'302 Moved Temporarily'
else:
response.status = moved_permanently()
ret = b'301 Moved Permanently'
response.location = url
response.start()
return ret
return wrapper
return decorator
| 2.34375
| 2
|
models/__init__.py
|
13952522076/Efficient_ImageNet_Classification
| 16
|
12777767
|
<filename>models/__init__.py<gh_stars>10-100
from __future__ import absolute_import
from .resnet import *
from .resnet_se import *
| 1.132813
| 1
|
ws_utils.py
|
ngannlt/UITws-v1
| 0
|
12777768
|
import copy # for copying something in Python
import unicodedata
import multiprocessing as mp
from multiprocessing import Manager
import gc
import numpy as np
class WSUtils():
def __init__(self, VNDict):
##################################################################################################
'''
The dictionary bellow is normalizing map which is inherited from Dat Quoc Nguyen
(https://github.com/datquocnguyen/RDRsegmenter)
(https://github.com/datquocnguyen/VnMarMoT/blob/master/Utility.py)
'''
self.normalize_map = {
"òa": "oà", "óa": "oá", "ỏa": "oả", "õa": "oã",
"ọa": "oạ", "òe": "oè", "óe": "oé", "ỏe": "oẻ",
"õe": "oẽ", "ọe": "oẹ", "ùy": "uỳ", "úy": "uý",
"ủy": "uỷ", "ũy": "uỹ", "ụy": "uỵ", "Ủy": "Uỷ"
}
##################################################################################################
'''
The RDR_VNDict, VNFamilyName, and VNMiddle are inherited from the work of Dat Quoc Nguyen
(https://github.com/datquocnguyen/RDRsegmenter)
The UET_VNDict is inherited from the work of Tuan-Phong Nguyen
(https://github.com/phongnt570/UETsegmenter/blob/master/dictionary/VNDictObject)
'''
##################################################################################################
self.VNDict = set(self.read_lines('./dict/' + VNDict + '.txt'))
self.VNFamilyName = set(self.read_lines('./dict/VNFamilyName.txt'))
self.VNMiddle = set(self.read_lines('./dict/VNMiddle.txt'))
##################################################################################################
self.lower = '🄰' # this string indicates for "lower" token style
self.upper = '🄱' # this string indicates for "upper" token style
self.bos = '🄲' # this string indicates for "begin of sentence" token style
self.eos = '🄳' # this string indicates for "end of sentence" token style
self.allupper = '🄴' # this string indicates for "all characters are upper" token style
self.other = '🄵' # this string indicates for "other" token style (not in of all style above)
##################################################################################################
def pop_seen_words_sfx(self, train_stns, ratios):
seen_words = set()
seen_words_sfx = set()
for stn in train_stns:
wrds = stn.lower().split(' ')
for w in wrds:
seen_words.update({w})
syl = w.split('_')
sfx = syl[-1]
if len(syl) in [3, 4] and sfx in ratios['sfx'] and not self.inVNFamilyName(syl[0]):
seen_words_sfx.update({w})
seen_words.update(self.VNDict)
for word in seen_words_sfx:
if self.inVNDict(word.replace('_', ' ')):
self.VNDict.discard(word.replace('_', ' '))
return seen_words, seen_words_sfx
def get_unseenwords_sfx(self, test_stns, seen_words, ratios):
unseen_words_sfx = set()
for stn in test_stns:
wrds = stn.lower().split(' ')
for w in wrds:
syl = w.split('_')
sfx = syl[-1]
if len(syl) in [3, 4] and sfx in ratios['sfx'] and not self.inVNFamilyName(syl[0]):
if ' '.join(syl) not in seen_words:
unseen_words_sfx.update({w})
return unseen_words_sfx
def normalize_accent(self, line_pos):
for key in self.normalize_map.keys():
line_pos = line_pos.replace(key, self.normalize_map[key])
return unicodedata.normalize('NFC', u'' + line_pos)
def possible_tag(self, stns_train, POSs):
possible_lbl = {}
for idx, line in enumerate(stns_train):
lbl = POSs[idx]
for j, word in enumerate(line):
word_lower = word.lower()
if word not in possible_lbl:
possible_lbl[word_lower] = [lbl[j]]
else:
if lbl[j] not in possible_lbl[word_lower]:
possible_lbl[word_lower].append(lbl[j])
return possible_lbl
def read_lines(self, file_path):
'''
Input: The path of the text file.
Output: The list in which each line of the list according to each line in the input text file
'''
lines = []
f = open(file_path, 'r', encoding='utf8')
for line in f.readlines():
line_pos = line.replace('\n', '')
lines.append(self.normalize_accent(line_pos))
f.close()
return lines
def read_ws_corpus(self, file_path):
'''
Input: The path of the text file.
Output: The list in which each line of the list according to each line in the input text file
'''
lines = []
f = open(file_path, 'r', encoding='utf8')
for line in f.readlines():
line_pos = line.replace('\n', '')
'''
The "underscore" character has the task to concatenate continuous tokens into a work
The "space" character has the task to segment work (to mark the boundary of two words)
"Two lines" of code bellow has the task to fix the little errors in the VLSP 2013 for Word Segmentation dataset
These errors occur only "four times" in the "testing set" of the VLSP 2013 for Word Segmentation dataset
Therefore, that errors will be not affected on all results because of it very very very very very small than total
'''
######################################
line_pos = line_pos.replace('_ ', ' ')
line_pos = line_pos.replace(' _', ' ')
######################################
lines.append(self.normalize_accent(line_pos))
f.close()
return lines
def read_pos_training_corpus(self, file_path):
'''
Input:
Output:
'''
f = open(file_path, 'r', encoding='utf8')
list_stns, list_POSs = [], []
words_tmp, POSs_tmp = [], []
lbls, id2lbl = {}, {}
for line in f.readlines():
if len(line.replace('\n', '')) == 0:
list_stns.append(words_tmp)
list_POSs.append(POSs_tmp)
words_tmp, POSs_tmp = [], []
continue
line_split = line.replace('\n', '').split('\t')
words_tmp.append(self.normalize_accent(line_split[0]))
lbl = self.normalize_accent(line_split[1])
if lbl not in lbls:
lbls[lbl] = len(lbls)
POSs_tmp.append(lbls[lbl])
f.close()
for key in lbls.keys():
id2lbl[lbls[key]] = key
return list_stns, list_POSs, lbls, id2lbl
def read_pos_test_corpus(self, file_path):
'''
Input:
Output:
'''
f = open(file_path, 'r', encoding='utf8')
list_stns, list_POSs = [], []
words_tmp, POSs_tmp = [], []
for line in f.readlines():
if len(line.replace('\n', '')) == 0:
list_stns.append(words_tmp)
list_POSs.append(POSs_tmp)
words_tmp, POSs_tmp = [], []
continue
line_split = line.replace('\n', '').split('\t')
words_tmp.append(self.normalize_accent(line_split[0]))
POSs_tmp.append(self.normalize_accent(line_split[1]))
f.close()
return list_stns, list_POSs
def add_string_to_dict(self, vocab, string):
'''
Put a string to a dictionary and update the counters of keys
'''
if string not in vocab: vocab[string] = 1
else: vocab[string] += 1
def syl_type(self, s):
'''
Return style of an input token by using "utils" object
(All styles are described above)
'''
if s == self.bos:
return self.bos
if s == self.eos:
return self.eos
if s.islower():
return self.lower
if s.isupper():
return self.allupper
if len(s) > 1 and s[0].isupper() and s[1:].islower():
return self.upper
return self.other
def inVNDict(self, syl):
'''
Input: a string
Output: True or False
(Check whether a string in the Vietnamese dictionary)
'''
return syl in self.VNDict
def inVNFamilyName(self, syl):
'''
Input: a string
Output: True or False
(Check whether a string is a Vietnamese family name)
'''
return syl in self.VNFamilyName
def inVNMiddle(self, syl):
'''
Input: a string
Output: True or False
(Check whether a string is a Vietnamese middle name)
'''
return syl in self.VNMiddle
def compute_ratios(self, training_sentences):
# Counters of some n-grams patterns
n_gram_1 = {'e1_gram-1_gram_n0_pre': {}, 'e2_gram-1_gram_n0': {}}
pos_1_gram = {}
head_sfx = {}
# Counting for some n-grams patterns
for line in training_sentences:
n_grams = line.split(' ')
n_grams_N = len(n_grams)
for idx, n_gram in enumerate(n_grams):
tokens = n_gram.lower().split('_')
tokens_original = n_gram.split('_')
if 4 < len(tokens) and len(tokens) < 10:
self.VNDict.update({n_gram.replace('_', ' ').lower()})
if len(tokens) == 1:
if idx < n_grams_N - 1:
next_word = n_grams[idx + 1]
if len(next_word.split('_')) > 1:
if self.syl_type(tokens_original[0]) == self.lower:
self.add_string_to_dict(n_gram_1['e1_gram-1_gram_n0_pre'], tokens[0])
elif self.syl_type(next_word) in [self.lower, self.allupper, self.upper]:
if self.syl_type(tokens_original[0]) == self.lower:
self.add_string_to_dict(n_gram_1['e1_gram-1_gram_n0_pre'], tokens[0])
if len(tokens) > 1:
if self.syl_type(tokens_original[0]) == self.lower:
self.add_string_to_dict(n_gram_1['e2_gram-1_gram_n0'], tokens[0])
if len(tokens) == 3 or len(tokens) == 4:
head = ' '.join(tokens[:-1])
sfx = tokens[-1]
if self.inVNDict(head) and not self.inVNDict(' '.join(tokens)):
if all(self.syl_type(t_original) == self.lower for t_original in tokens_original):
if not self.inVNFamilyName(tokens[0]):
self.add_string_to_dict(pos_1_gram, sfx)
if sfx not in head_sfx:
head_sfx[sfx] = {head}
else:
head_sfx[sfx].update({head})
avg_pos_1_gram = 0
for key in pos_1_gram:
avg_pos_1_gram += pos_1_gram[key]
pos_final = []
if len(pos_1_gram) > 0:
avg_pos_1_gram = avg_pos_1_gram/len(pos_1_gram)
for key in pos_1_gram:
if pos_1_gram[key] > avg_pos_1_gram and len(head_sfx[key]) > 1:
pos_final.append(key)
tmp_ratios_2 = {}
avg_ratios_2 = 0
ratios_2 = {}
for key in n_gram_1['e1_gram-1_gram_n0_pre'].keys():
if key in n_gram_1['e2_gram-1_gram_n0']:
quantity = n_gram_1['e1_gram-1_gram_n0_pre'][key]+n_gram_1['e2_gram-1_gram_n0'][key]
tmp_ratios_2[key] = [n_gram_1['e1_gram-1_gram_n0_pre'][key]>n_gram_1['e2_gram-1_gram_n0'][key],quantity]
avg_ratios_2 += quantity
if len(tmp_ratios_2) > 0:
avg_ratios_2 = avg_ratios_2/len(tmp_ratios_2)
for key in tmp_ratios_2:
if tmp_ratios_2[key][1] > avg_ratios_2:
if tmp_ratios_2[key][0]:
ratios_2[key] = tmp_ratios_2[key][0]
return {'sep': ratios_2, 'sfx': pos_final}
def extract_training_sentence(self, sentence):
'''
Input: a sentence (with "underscores" character)
Output: the list of syllabus and labels (0 for "space" and 1 for "underscore" after each token)
For instance:
+ Input: bùng_phát việc khai_thác tự_do mỏ sắt Trại_Bò
+ Output:
- syls: ['bùng', 'phát', 'việc', 'khai', 'thác', 'tự', 'do', 'mỏ', 'sắt', 'Trại', 'Bò']
- lbls: [1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0]
We have noted that the "label" at the end of a sentence always is "space"! (And no "label" at the begin of sentence)
'''
syls, lbls, cur_idx = [], [], 0
stn = sentence + ' '
for idx, character in enumerate(stn):
if character == ' ' or character == '_':
if character == ' ':
lbls.append(0)
else:
lbls.append(1)
syls.append(stn[cur_idx:idx])
cur_idx = idx + 1
return syls, lbls
def extract_syls_windows(self, syls, lbls, window_size=8):
'''
Input: the list of syllabus and labels of training sentence
Output: all syllabus windows (by padding at the begin and end of sentence and sliding!)
'''
bi_lbl = copy.deepcopy(lbls)
for i in range(window_size):
syls = [self.bos] + syls + [self.eos]
bi_lbl = [0] + bi_lbl + [0]
return [[syls[i-window_size:i+window_size+1], bi_lbl[i-window_size:i+window_size+1]]\
for i in range(window_size, len(syls)-window_size)]
def extract_syls_test_windows(self, syls, window_size=8):
'''
Input: the list of syllabus of testing sentence
Output: all syllabus windows (by padding at the begin and end of sentence and sliding!)
'''
for i in range(window_size):
syls = [self.bos] + syls + [self.eos]
return [syls[i-window_size:i+window_size+1] for i in range(window_size, len(syls)-window_size)]
def get_support(self, true_line, pred_line):
'''
Input:
+ The dictionary of "true" and "predicted" line
+ The format of key is: [the begin position of word]_[the end position of word]
+ The format of value is: string of word
Output:
+ Corrected prediction satisfies: both the predicted and true pair have the same key and value
+ Number of corrected words are segmented, number of words in "predicted" line, and number of word in "true" line
'''
nb_correctly_segmented = 0
for key in pred_line.keys():
if key in true_line and pred_line[key] == true_line[key]:
nb_correctly_segmented += 1
return nb_correctly_segmented, len(pred_line), len(true_line)
def get_support_details(self, true_line, pred_line, unseen_words_sfx):
'''
Input:
+ The dictionary of "true" and "predicted" line
+ The format of key is: [the begin position of word]_[the end position of word]
+ The format of value is: string of word
Output:
+ Corrected prediction satisfies: both the predicted and true pair have the same key and value
+ Number of corrected words are segmented, number of words in "predicted" line, and number of word in "true" line
'''
nb_correctly_segmented = {'1': 0, '2': 0, '3a': 0, '3b': 0, '4a': 0, '4b': 0, '5-9': 0, '10-': 0}
nb_pred = {'1': 0, '2': 0, '3a': 0, '3b': 0, '4a': 0, '4b': 0, '5-9': 0, '10-': 0}
nb_true = {'1': 0, '2': 0, '3a': 0, '3b': 0, '4a': 0, '4b': 0, '5-9': 0, '10-': 0}
for key in pred_line.keys():
nb_syls = len(pred_line[key].split('_'))
if 10 <= nb_syls:
nb_syls = '10-'
elif 5 <= nb_syls and nb_syls <= 9:
nb_syls = '5-9'
elif nb_syls in [3,4]:
if pred_line[key].lower() in unseen_words_sfx:
nb_syls = str(nb_syls) + 'b'
else:
nb_syls = str(nb_syls) + 'a'
else:
nb_syls = str(nb_syls)
nb_pred[nb_syls] += 1
for key in true_line.keys():
nb_syls = len(true_line[key].split('_'))
if 10 <= nb_syls:
nb_syls = '10-'
elif 5 <= nb_syls and nb_syls <= 9:
nb_syls = '5-9'
elif nb_syls in [3,4]:
if true_line[key].lower() in unseen_words_sfx:
nb_syls = str(nb_syls) + 'b'
else:
nb_syls = str(nb_syls) + 'a'
else:
nb_syls = str(nb_syls)
nb_true[nb_syls] += 1
for key in true_line.keys():
nb_syls = len(true_line[key].split('_'))
if 10 <= nb_syls:
nb_syls = '10-'
elif 5 <= nb_syls and nb_syls <= 9:
nb_syls = '5-9'
elif nb_syls in [3,4]:
if true_line[key].lower() in unseen_words_sfx:
nb_syls = str(nb_syls) + 'b'
else:
nb_syls = str(nb_syls) + 'a'
else:
nb_syls = str(nb_syls)
if key in pred_line and pred_line[key] == true_line[key]:
nb_correctly_segmented[nb_syls] += 1
return nb_correctly_segmented, nb_pred, nb_true
def compute_score_details(self, list_stn, list_predict, unseen_words_sfx):
nb_correct, nb_output, nb_ref = 0, 0, 0
nb_correct = {'1': 0, '2': 0, '3a': 0, '3b': 0, '4a': 0, '4b': 0, '5-9': 0, '10-': 0}
nb_output = {'1': 0, '2': 0, '3a': 0, '3b': 0, '4a': 0, '4b': 0, '5-9': 0, '10-': 0}
nb_ref = {'1': 0, '2': 0, '3a': 0, '3b': 0, '4a': 0, '4b': 0, '5-9': 0, '10-': 0}
precision = {}
recall = {}
f1_score = {}
for idx, stn in enumerate(list_stn):
pred_sentence = list_predict[idx]
n_c, n_p, n_r = self.get_support_details(self.exact_wordboundary(stn), self.exact_wordboundary(pred_sentence),\
unseen_words_sfx)
for key in nb_correct.keys():
nb_correct[key] += n_c[key]
nb_output[key] += n_p[key]
nb_ref[key] += n_r[key]
for key in nb_correct.keys():
if nb_output[key] > 0:
precision[key] = 100*nb_correct[key]/nb_output[key]
else:
precision[key] = 0
if precision[key] > 0:
recall[key] = 100*nb_correct[key]/nb_ref[key]
else:
recall[key] = 0
if precision[key]+recall[key] > 0:
f1_score[key] = 2*precision[key]*recall[key]/(precision[key]+recall[key])
else:
f1_score[key] = 0
performance_detail = {'precision': precision, 'recall': recall, 'f1_score': f1_score,\
'nb_correct': nb_correct, 'nb_output': nb_output, 'nb_ref': nb_ref}
nb_correct_total = sum(nb_correct.values())
nb_output_total = sum(nb_output.values())
nb_ref_total = sum(nb_ref.values())
precision_total = 100*nb_correct_total/nb_output_total
recall_total = 100*nb_correct_total/nb_ref_total
f1_score_total = 2*precision_total*recall_total/(precision_total+recall_total)
performance_total = {'precision': precision_total, 'recall': recall_total, 'f1_score': f1_score_total}
return performance_detail, performance_total
def exact_wordboundary(self, line):
'''
Input:
+ A sentence contains underscore characters
Output:
+ The dictionary of the input line
+ The format of key is: [the begin position of word]_[the end position of word]
+ The format of value is: string of word
'''
tokens = line.split()
words = {}
idx = 1
for token in tokens:
length = len(token.split('_'))
if length > 1:
words[str(idx) + '-' + str(idx + length - 1)] = token
else:
words[str(idx + length - 1)] = token
idx = idx + length
return words
def fill_underscore(self, syls, lbls):
'''
This process is opposite with "extract_training_sentence" function
We fill "underscore" or "space" base on their labels
'''
output = ''
for idx, word in enumerate(syls):
output = output + word
if lbls[idx] == 0:
output = output + ' '
elif lbls[idx] == 1:
output = output + '_'
return output[:-1]
def B_I_O_to_underscore_space(self, id2lbl, pos, stn):
two_lbl = []
for idx, lbl in enumerate(pos):
if idx == len(pos) - 1:
two_lbl.append(0)
else:
if id2lbl[lbl] in ['B_W', 'I_W'] and id2lbl[pos[idx + 1]] == 'I_W':
two_lbl.append(1)
else:
two_lbl.append(0)
return self.fill_underscore(stn, two_lbl)
def extract_training_pairs(self, training_sentences):
X, Y = [], []
for sentence in training_sentences:
syls, lbls = self.extract_training_sentence(sentence)
syls_windows = self.extract_syls_windows(syls, lbls)
X.extend(syls_windows)
Y.extend(lbls)
return X, Y
def predict_list_of_sentence_ws(self, ws, NUM_PROCESSES, list_stn, get_support, has_underscore=False):
NUM_JOBS = len(list_stn)
nor_list = [self.normalize_accent(line) for line in list_stn]
predicted_sentences = [None for i in range(NUM_JOBS)]
if has_underscore:
nb_correct, nb_output, nb_ref = 0, 0, 0
nb_stn_correct = 0
raw_list = [line.replace('_',' ') for line in nor_list]
else:
raw_list = nor_list
def work(begin, end, return_dict):
for i in range(begin, end):
sentence = raw_list[i]
syls = sentence.split(' ')
syls_windows = self.extract_syls_test_windows(syls)
y, y_lbl = [], [0, 0, 0, 0, 0, 0, 0, 0]
for j in range(len(syls_windows)):
y_tmp = ws['model'].predict(ws['vectorizer'].transform([[syls_windows[j], y_lbl]]))
y_lbl.extend(y_tmp)
y.extend(y_tmp)
y_lbl = y_lbl[1:]
return_dict[i] = self.fill_underscore(syls, y)
if NUM_PROCESSES > 0:
with Manager() as manager:
return_dict = manager.dict()
processes = []
batch_size = int(NUM_JOBS/NUM_PROCESSES)
for i in range(NUM_PROCESSES):
begin = i*batch_size
end = begin + batch_size
if i == NUM_PROCESSES - 1:
end = NUM_JOBS
processes.append(mp.Process(target=work, args=(begin, end, return_dict)))
for p in processes:
p.daemon = True
p.start()
for p in processes:
p.join()
p.terminate()
for k, v in return_dict.items():
predicted_sentences[k] = v
else:
return_dict = {}
work(0, NUM_JOBS, return_dict)
for k, v in return_dict.items():
predicted_sentences[k] = v
for idx, p_sentence in enumerate(predicted_sentences):
if has_underscore:
n_c, n_p, n_r = self.get_support(self.exact_wordboundary(nor_list[idx]), self.exact_wordboundary(p_sentence))
nb_correct += n_c
nb_output += n_p
nb_ref += n_r
if n_c == n_p and n_c == n_r:
nb_stn_correct += 1
if has_underscore:
precision = nb_correct/nb_output
recall = nb_correct/nb_ref
if precision+recall > 0:
f1_score = 2*precision*recall/(precision+recall)
else:
f1_score = 0
if get_support:
return predicted_sentences, [nb_output, nb_ref, nb_correct]
else:
return predicted_sentences, [precision, recall, f1_score]
return predicted_sentences
| 2.390625
| 2
|
src/rastervision/evaluations/segmentation_evaluation_test.py
|
nholeman/raster-vision
| 0
|
12777769
|
import unittest
import numpy as np
from rastervision.core.class_map import (ClassItem, ClassMap)
from rastervision.evaluations.segmentation_evaluation import (
SegmentationEvaluation)
from rastervision.label_stores.segmentation_raster_file import (
SegmentationInputRasterFile)
from rastervision.label_stores.segmentation_raster_file_test import (
TestingRasterSource)
class TestSegmentationEvaluation(unittest.TestCase):
def test_compute(self):
class_map = ClassMap(
[ClassItem(id=1, name='one'),
ClassItem(id=2, name='two')])
raster_class_map = {'#010101': 1, '#020202': 2}
gt_array = np.ones((5, 5, 3), dtype=np.uint8)
gt_array[0, 0, :] = 0
gt_array[2, 2, :] = 2
gt_raster = TestingRasterSource(data=gt_array)
gt_label_store = SegmentationInputRasterFile(
source=gt_raster, raster_class_map=raster_class_map)
p_array = np.ones((4, 4, 3), dtype=np.uint8)
p_array[1, 1, :] = 0
p_raster = TestingRasterSource(data=p_array)
p_label_store = SegmentationInputRasterFile(
source=p_raster, raster_class_map=raster_class_map)
seval = SegmentationEvaluation()
seval.compute(class_map, gt_label_store, p_label_store)
tp1 = 16 - 3 # 4*4 - 3 true positives for class 1
fp1 = 1 # 1 false positive (2,2) and one don't care at (0,0)
fn1 = 1 # one false negative (1,1)
precision1 = float(tp1) / (tp1 + fp1)
recall1 = float(tp1) / (tp1 + fn1)
tp2 = 0 # 0 true positives for class 2
fn2 = 1 # one false negative (2,2)
precision2 = None # float(tp2) / (tp2 + fp2) where fp2 == 0
recall2 = float(tp2) / (tp2 + fn2)
self.assertAlmostEqual(precision1,
seval.class_to_eval_item[1].precision)
self.assertAlmostEqual(recall1, seval.class_to_eval_item[1].recall)
self.assertEqual(precision2, seval.class_to_eval_item[2].precision)
self.assertAlmostEqual(recall2, seval.class_to_eval_item[2].recall)
if __name__ == "__main__":
unittest.main()
| 2.640625
| 3
|
inference.py
|
haifangong/TNSC-classification-baseline
| 10
|
12777770
|
<reponame>haifangong/TNSC-classification-baseline
import argparse
import os
import torch
from resnest.torch import resnest50
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.models.resnet import resnet34, resnet18, resnet50, resnet101
from tqdm import tqdm
from dataloaders import custom_transforms as trforms
from dataloaders import tnsc_dataset
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-gpu', type=str, default='0')
# Model setting
parser.add_argument('-backbone', type=str, default='resnest50')
parser.add_argument('-input_size', type=int, default=224)
parser.add_argument('-model_path', type=str, default='path2model')
# Training setting
parser.add_argument('-seed', type=int, default=1234)
parser.add_argument('-batch_size', type=int, default=16)
parser.add_argument('-nepochs', type=int, default=100)
parser.add_argument('-resume_epoch', type=int, default=0)
parser.add_argument('-train_fold', type=str, default='CROP')
parser.add_argument('-run_id', type=int, default=-1)
# Optimizer setting
parser.add_argument('-lr', type=float, default=1e-2)
parser.add_argument('-weight_decay', type=float, default=5e-4)
parser.add_argument('-momentum', type=float, default=0.9)
parser.add_argument('-update_lr_every', type=int, default=20)
parser.add_argument('-save_every', type=int, default=2)
parser.add_argument('-log_every', type=int, default=100)
return parser.parse_args()
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if args.backbone == 'resnet18':
backbone = resnet18(nInputChannels=3, os=32, pretrained=True)
elif args.backbone == 'resnet34':
backbone = resnet34(num_classes=2, pretrained=False)
elif args.backbone == 'resnet50':
backbone = resnet50(nInputChannels=3, os=32, pretrained=True)
elif args.backbone == 'resnet101':
backbone = resnet101(nInputChannels=3, os=32, pretrained=True)
elif args.backbone == 'resnest50':
backbone = resnest50(num_classes=2, pretrained=False)
else:
raise NotImplementedError
backbone.load_state_dict(torch.load(args.model_path, map_location=lambda storage, loc: storage))
torch.cuda.set_device(device=0)
backbone.cuda()
composed_transforms_ts = transforms.Compose([
trforms.FixedResizeI(size=(args.input_size, args.input_size)),
trforms.NormalizeI(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
trforms.ToTensorI()])
testset = tnsc_dataset.TNSCDataset(mode='test', transform=composed_transforms_ts, return_size=False)
testloader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=2)
backbone.eval()
f = open('classification.csv', 'w')
f.write('ID,CATE\n')
for sample_batched in tqdm(testloader):
img = sample_batched['image'].cuda()
feats = backbone.forward(img)
pred = torch.argmax(feats, dim=1, keepdim=False)
f.write(sample_batched['label_name'][0]+','+str(pred.item())+'\n')
if __name__ == '__main__':
args = get_arguments()
main(args)
| 1.992188
| 2
|
src/csv_download/__init__.py
|
Godan/csv_download
| 0
|
12777771
|
from importlib.metadata import version
try:
__version__ = version(__name__)
except:
pass
| 1.210938
| 1
|
py/examples/plot_form.py
|
dethnass/wave
| 1
|
12777772
|
# Plot / Form
# Display a plot inside a form.
# ---
from synth import FakeCategoricalSeries
from h2o_wave import site, data, ui
page = site['/demo']
n = 20
f = FakeCategoricalSeries()
v = page.add('example', ui.form_card(
box='1 1 4 5',
items=[
ui.text_xl('Example 1'),
ui.visualization(
plot=ui.plot([ui.mark(type='interval', x='=product', y='=price', y_min=0)]),
data=data(fields='product price', rows=[(c, x) for c, x, _ in [f.next() for _ in range(n)]], pack=True),
),
ui.text_xl('Example 2'),
ui.visualization(
plot=ui.plot([ui.mark(type='interval', x='=product', y='=price', y_min=0)]),
data=data(fields='product price', rows=[(c, x) for c, x, _ in [f.next() for _ in range(n)]], pack=True),
),
],
))
page.save()
| 2.75
| 3
|
certification/cert_many_fx_spot.py
|
bidfx/bidfx-api-py
| 3
|
12777773
|
<filename>certification/cert_many_fx_spot.py
#!/usr/bin/env python
import logging
from bidfx import Session, Subject, Field
"""
Example for API Certification with FX spots.
"""
def on_price_event(event):
subject = event.subject
price_map = event.price
if price_map:
print(
"LEVEL 1 {} {} {} {} {} -> {} / {}".format(
subject[Subject.LIQUIDITY_PROVIDER],
subject[Subject.DEAL_TYPE],
subject[Subject.TENOR],
subject[Subject.CURRENCY],
subject[Subject.QUANTITY],
price_map.get(Field.BID, ""),
price_map.get(Field.ASK, ""),
)
)
def on_subscription_event(event):
print(f"Subscription to {event.subject} is {event.status}")
def on_provider_event(event):
print(f"Provider {event.provider} is {event.status}")
def main():
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-7s %(threadName)-12s %(message)s",
)
session = Session.create_from_ini_file()
pricing = session.pricing
pricing.callbacks.price_event_fn = on_price_event
pricing.callbacks.subscription_event_fn = on_subscription_event
pricing.callbacks.provider_event_fn = on_provider_event
session.pricing.start()
# Subscribe to next Level 1 subject
for lp in ["RBCFX", "SSFX", "MSFX"]:
ccy_pair = "EURUSD"
ccy = "EUR"
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(1000000)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(5000000)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(20000000)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(100000000)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(1234567.80)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(1234567.89)
.create_subject()
)
ccy = "USD"
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(1000000)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(1234567.89)
.create_subject()
)
ccy_pair = "USDJPY"
ccy = "USD"
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(1000000)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(100000000)
.create_subject()
)
ccy = "JPY"
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(100000000)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(1000000000)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(10000000000)
.create_subject()
)
session.pricing.subscribe(
session.pricing.build.fx.stream.spot.liquidity_provider(lp)
.currency_pair(ccy_pair)
.currency(ccy)
.quantity(12345678901)
.create_subject()
)
if __name__ == "__main__":
main()
| 2.234375
| 2
|
{{cookiecutter.PROJECT_NAME}}/utils/drf/filters.py
|
pyFigure/cc_django
| 4
|
12777774
|
from django.db.models import Q
from model_utils.models import now
from rest_framework.filters import BaseFilterBackend
class OwnerFilter(BaseFilterBackend):
"""过滤属于当前用户的数据"""
def filter_queryset(self, request, queryset, view):
current = request.user
return queryset.filter(owner=current)
class TimeRangeFilter(BaseFilterBackend):
"""时间区间过滤器"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Q(start__isnull=True) | Q(end__isnull=True) | Q(start__lte=now) | Q(end__gte=now)).filter(
~Q(start__gt=now)).filter(~Q(end__lt=now))
| 2.125
| 2
|
pimpd/widgets/textlist.py
|
eprst/pimpd
| 1
|
12777775
|
import math
from scrollingtext import ScrollingText
from widget import Widget
class TextList(Widget):
_text_margin = 1
_selected = None # type: None | int
def __init__(self, position, size, font, empty_items_text):
super(TextList, self).__init__(position, size)
self._font = font
self._emtpy_items_text = empty_items_text
width = size[0]
height = size[1]
assert width > 0
assert height > 0
text_height = font.getsize("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")[1]
assert text_height > 0
num_lines = int(math.ceil(float(height) / (text_height + self._text_margin)))
self._lines = []
for i in range(0, num_lines):
line_y = i * (text_height + self._text_margin)
self._lines.append(ScrollingText(
(self._text_margin, line_y),
(width - 2 * self._text_margin, text_height),
font,
""
))
self._items = []
self._selected_item = None
self._on_empty_items()
def _middle_line(self):
return self._lines[(len(self._lines) - 1) / 2]
def _on_empty_items(self):
self._reset_lines()
self._middle_line().set_text(self._emtpy_items_text)
self._middle_line().set_scroll(True)
self._top_line_idx = None
self._selected = None
def _reset_lines(self):
for l in self._lines:
l.set_text("")
l.set_invert(False)
l.set_draw_border(False)
l.set_scroll(False)
@property
def selected(self):
return self._selected
def set_selected(self, selected):
self._selected = max(0, min(selected, len(self._items) - 1))
def set_items(self, items):
self._items = items
if len(items) == 0:
self._selected = None
self._on_empty_items()
else:
# noinspection PyTypeChecker
if self._selected is None:
self._selected = 0
elif self._selected >= len(items):
self._selected = len(items) - 1
self._update_lines()
self._need_refresh = True
def select_next(self):
if self._selected is not None:
self._selected = (self._selected + 1) % len(self._items)
self._update_lines()
self._need_refresh = True
def select_previous(self):
if self._selected is not None:
self._selected -= 1
if self._selected < 0:
self._selected = len(self._items) + self._selected
self._update_lines()
self._need_refresh = True
def tick(self):
if self._selected_item is not None:
self._selected_item.tick()
def need_refresh(self):
if super(TextList, self).need_refresh():
return True
return self._selected_item is not None and self._selected_item.need_refresh()
def _update_lines(self):
self._reset_lines()
# 0, 1, 2, ..., k-1
# 0 <= n <= k // window size
# 0 <= i <= k
# find 0 <= s <= k such that:
# (s + n/2) mod k == i
# s + n/2 = x*k + i
k = len(self._items)
n = min(k, len(self._lines))
s = self._selected - n / 2 + 1
# if s < 0:
# s += k
if s < 0:
s = 0
elif s + n > k:
s = k - n
# make bottom line fully visible if it is selected
text_height = self._lines[0].size[1]
num_lines = len(self._lines)
if k * (text_height + self._text_margin) > self.size[1]:
if self._selected == k - 1:
y = self.size[1] - text_height
for i in range(0, num_lines):
line = self._lines[num_lines - i - 1]
line.set_position((self._text_margin, y))
y -= text_height
y -= self._text_margin
else:
for i in range(0, num_lines):
y = i * (text_height + self._text_margin)
line = self._lines[i]
line.set_position((self._text_margin, y))
for i in range(0, n):
line = self._lines[i]
line.set_text(self._items[s])
line.set_scroll(s == self._selected)
line.set_invert(s == self._selected)
if s == self._selected:
self._selected_item = line
s = (s + 1) % k
def _draw(self, img, draw):
super(TextList, self)._draw(img, draw)
for l in self._lines:
l.refresh(img, draw)
| 3.5625
| 4
|
Ene-Jun-2019/Karla Berlanga/2do Parcial/Practica 3/database.py
|
Arbupa/DAS_Sistemas
| 41
|
12777776
|
<reponame>Arbupa/DAS_Sistemas<gh_stars>10-100
import sqlite3
from API_Marvel import Character
class DataBase():
def __init__(self, file):
self.connection = sqlite3.connect(file)
def CreateTable(self):
# Se crea la base de datos
cursor = self.connection.cursor()
try:
# Se verifica que la tabla no exista. Se crea la tabla
cursor.execute("""CREATE TABLE IF NOT EXISTS characters (id INTEGER PRIMARY KEY AUTOINCREMENT,
clave int,
name text,
description text,
url_detail text,
url_wiki text,
movie int
)""")
return ("LA TABLA characters SE CREÓ EXITOSAMENTE")
except sqlite3.OperationalError:
return ("OCURRIÓ UN ERROR")
self.connection.commit()
def SaveCharacter(self, character):
# Se hace la conexión a la base de datos
cursor = self.connection.cursor()
# Se hace la siguiente consulta para verificar que el libro que se agregará no exista en la base de datos
cursor.execute('''SELECT clave,movie FROM characters WHERE clave = "{}" and movie = "{}"'''.format(character.clave, character.movie))
# Se guarda el resultado de la consulta anterior en la variable isbn, puede contener un isbn o None
resultados = cursor.fetchone()
if resultados is None:
# Si el ibsn=None, se inserta el libro
parametros = {
'CLAVE':character.clave,
'NAME': character.name,
'DESCRIPTION':character.description,
'URL_DETAIL': character.url_detail,
'URL_WIKI':character.url_wiki,
'MOVIE':int(character.movie)}
try:
cursor.execute( "INSERT INTO characters (id, clave, name, description, url_detail, url_wiki, movie) VALUES(null,:CLAVE,:NAME,:DESCRIPTION,:URL_DETAIL,:URL_WIKI,:MOVIE);", parametros)
print("EL PERSONAJE: {} SE INSERTÓ EXITOSAMENTE A LA BASE DE DATOS".format(character.name))
except:
return("ERROR AL INSERTAR EL PERSONAJE A LA BASE DE DATOS")
self.connection.commit()
cursor.execute("""SELECT * FROM characters WHERE clave = :CLAVE """, {'CLAVE': character.clave})
consulta = cursor.fetchall()
list_characters = []
for c in consulta:
character = Character(c[1], c[2], c[3], c[4], c[5], c[6])
list_characters.append(character)
return list_characters
self.connection.commit()
else:
return "EL PERSONAJE {} YA EXISTE EN LA BASE DE DATOS".format(character.name)
def ShowCharacters(self, movie):
# Se hace la conexión a la base de datos
cursor = self.connection.cursor()
list = []
# Se hace la consulta de los libros que se mostrará para ver si hay registros
cursor.execute("""SELECT * FROM characters where movie={}""".format(movie))
registros = cursor.fetchall()
if registros == []:
return "NO HAY REGISTROS EN LA BASE DE DATOS"
cursor.execute("""SELECT * FROM characters where movie={}""".format(movie))
consulta = cursor.fetchall()
for c in consulta:
character = Character(c[1], c[2], c[3], c[4], c[5], c[6])
list.append(character)
return list
if __name__ == '__main__':
db = DataBase('avengers.db')
#print(db.CreateTable())
#print(db.ShowCharacters(1))
pass
| 3.640625
| 4
|
examples/prime_numbers/ferma/ferma.py
|
Electro98/aads
| 7
|
12777777
|
"""Monty hall paradox
Wiki:
https://en.wikipedia.org/wiki/Fermat_primality_test
"""
import math
import random
def ferma(number: int, k: int = 100) -> bool:
"""Тест простоты Ферма
Wiki:
https://en.wikipedia.org/wiki/Fermat_primality_test
:param number: проверяемое число
:type number: int
:param k: количество тестов
:type k: int, default 100
:return: True если число псевдопростое, False если составное
:rtype: bool
"""
if number == 2:
return True
for _ in range(1, k + 1):
random_number = (random.randint(1, number) % (number - 2)) + 2
# проверка на взаимную простоту чисел random_number и number
if math.gcd(random_number, number) != 1:
return False
# проверка условия теоремы Ферма, с использованием возведения
# числа в степень по модулю
if pow(random_number, number - 1, number) != 1:
return False
return True
| 4.28125
| 4
|
elvers/rules/paladin/paladin-align.py
|
dib-lab/2018-snakemake-eel-pond
| 12
|
12777778
|
"""Snakemake wrapper for PALADIN alignment"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
from os import path
from snakemake.shell import shell
extra = snakemake.params.get("extra", "")
log = snakemake.log_fmt_shell(stdout=False, stderr=True)
r = snakemake.input.get("r")
assert r is not None, "reads are required as input"
index = snakemake.input.get("index")
assert index is not None, "please index your assembly and provide the basename (with'.bwt' extension) via the 'index' input param"
index_base = str(index).rsplit('.bwt')[0]
outfile = snakemake.output
# if bam output, pipe to bam!
output_cmd = " | samtools view -Sb - > " if str(outfile).endswith('.bam') else " -o "
min_orf_len = snakemake.params.get('f', '250')
shell("paladin align -f {min_orf_len} -t {snakemake.threads} {extra} {index_base} {r} {output_cmd} {outfile}")
| 2.15625
| 2
|
setup.py
|
laurent-radoux/multi_notifier
| 0
|
12777779
|
<filename>setup.py
#!/usr/bin/env python
"""The setup script."""
import pip
from setuptools import setup, find_packages
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
## workaround derived from: https://github.com/pypa/pip/issues/7645#issuecomment-578210649
parsed_requirements = parse_requirements(
'requirements/prod.txt',
session='workaround'
)
parsed_test_requirements = parse_requirements(
'requirements/test.txt',
session='workaround'
)
try:
requirements = [str(ir.req) for ir in parsed_requirements]
test_requirements = [str(tr.req) for tr in parsed_test_requirements]
except AttributeError:
requirements = [str(ir.requirement) for ir in parsed_requirements]
test_requirements = [str(tr.requirement) for tr in parsed_test_requirements]
setup(
author="<NAME>",
author_email='<EMAIL>',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Python package that allows notifying a message via one of mulitple notification services",
entry_points={
'console_scripts': [
'multi_notifier=multi_notifier.cli:cli',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='multi_notifier',
name='multi_notifier',
packages=find_packages(include=['multi_notifier', 'multi_notifier.*']),
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/laurent-radoux/multi_notifier',
version='0.1.0',
zip_safe=False,
)
| 2.046875
| 2
|
mach_cad/tools/magnet/document/__init__.py
|
Severson-Group/MachEval
| 6
|
12777780
|
from . import document
from . import view
from .document import*
from .view import*
__all__ = []
__all__ += document.__all__
__all__ += view.__all__
| 1.226563
| 1
|
py/math/src/multivariate_distribution2.py
|
LightSun/study_pcl
| 0
|
12777781
|
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline 缩放
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (12, 8)
# Normal distributed x and y vector with mean 0 and standard deviation 1
x = np.random.normal(0, 1, 200)
y = np.random.normal(0, 1, 200)
X = np.vstack((x, y)) # 2xn
# 缩放
sx, sy = 0.5, 2.0
Scale = np.array([[sx, 0], [0, sy]])
Y = Scale.dot(X)
# 原始点集
plt.scatter(X[0, :], X[1, :])
# 缩放后点
plt.scatter(Y[0, :], Y[1, :])
plt.title('Generated Data')
plt.axis('equal')
plt.show()
| 2.984375
| 3
|
tests/spider_error.py
|
alexey-v-paramonov/grab
| 0
|
12777782
|
import mock
from six import StringIO
from grab import GrabTimeoutError, Grab
from grab.spider import Spider, Task
from tests.util import BaseGrabTestCase, build_spider, run_test_if, GLOBAL
# That URLs breaks Grab's URL normalization process
# with error "label empty or too long"
INVALID_URL = 'http://13354&altProductId=6423589&productId=6423589'\
'&altProductStoreId=13713&catalogId=10001'\
'&categoryId=28678&productStoreId=13713'\
'http://www.textbooksnow.com/webapp/wcs/stores'\
'/servlet/ProductDisplay?langId=-1&storeId='
class SpiderErrorTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_generator_with_invalid_url(self):
class SomeSpider(Spider):
def task_generator(self):
yield Task('page', url=INVALID_URL)
bot = build_spider(SomeSpider)
bot.run()
def test_redirect_with_invalid_url(self):
server = self.server
class TestSpider(Spider):
def task_generator(self):
# pylint: disable=attribute-defined-outside-init
self.done_counter = 0
# pylint: enable=attribute-defined-outside-init
yield Task('page', url=server.get_url())
def task_page(self, grab, task):
pass
self.server.response_once['code'] = 301
self.server.response_once['headers'] = [
('Location', INVALID_URL),
]
bot = build_spider(TestSpider, network_try_limit=1)
bot.run()
# FIXME: fix
# That test case ruins the spider instance :(
#def test_redirect_with_invalid_byte(self):
# url = self.server.get_url()
# invalid_url = b'http://\xa0' + url.encode('ascii')
# def callback(server):
# server.set_status(301)
# server.add_header('Location', invalid_url)
# server.write('')
# server.finish()
# class TestSpider(Spider):
# def task_generator(self):
# #yield Task('page', url='http://www.tripadvisor.com/ShowUrl?
# #&excludeFromVS=false&odc=BusinessListingsUrl&d=4289178&url=1')
# #yield Task('page', invalid_url)
# yield Task('page', url)
#
# def task_page(self, grab, task):
# pass
#
# self.server.response['callback'] = callback
# bot = build_spider(TestSpider)
# bot.run()
def test_no_warning(self):
"""Simple spider should not generate
any warnings (warning module sends messages to stderr)
"""
out = StringIO()
with mock.patch('sys.stderr', out):
server = self.server
server.response['data'] = b'<div>test</div>'
class SimpleSpider(Spider):
# pylint: disable=unused-argument
initial_urls = [server.get_url()]
def task_initial(self, grab, task):
yield Task('more', url=server.get_url())
def task_more(self, grab, task):
#print(grab.doc.url)
grab.doc('//div').text()
bot = build_spider(SimpleSpider)
bot.run()
self.assertTrue(out.getvalue() == '')
def test_grab_attribute_exception(self):
server = self.server
server.response['sleep'] = 2
class SimpleSpider(Spider):
def task_generator(self):
grab = Grab()
grab.setup(url=server.get_url(),
timeout=1)
yield Task('page', grab=grab,
raw=True)
def task_page(self, grab, unused_task):
self.meta['exc'] = grab.exception
bot = build_spider(SimpleSpider)
bot.run()
self.assertTrue(isinstance(bot.meta['exc'], GrabTimeoutError))
@run_test_if(lambda: (GLOBAL['network_service'] == 'multicurl'
and GLOBAL['grab_transport'] == 'pycurl'),
'multicurl & pycurl')
def test_stat_error_name_multi_pycurl(self):
server = self.server
server.response['sleep'] = 2
class SimpleSpider(Spider):
def prepare(self):
self.network_try_limit = 1
def task_generator(self):
grab = Grab(url=server.get_url(), timeout=1)
yield Task('page', grab=grab)
def task_page(self, grab, unused_task):
pass
bot = build_spider(SimpleSpider)
bot.run()
self.assertTrue('error:operation-timeouted' in bot.stat.counters)
@run_test_if(lambda: (GLOBAL['network_service'] == 'threaded'
and GLOBAL['grab_transport'] == 'pycurl'),
'threaded & pycurl')
def test_stat_error_name_threaded_pycurl(self):
server = self.server
server.response['sleep'] = 2
class SimpleSpider(Spider):
def prepare(self):
self.network_try_limit = 1
def task_generator(self):
grab = Grab(url=server.get_url(), timeout=1)
yield Task('page', grab=grab)
def task_page(self, grab, unused_task):
pass
bot = build_spider(SimpleSpider)
bot.run()
print(bot.stat.counters)
self.assertTrue('error:grab-timeout-error' in bot.stat.counters)
@run_test_if(lambda: (GLOBAL['network_service'] == 'threaded'
and GLOBAL['grab_transport'] == 'urllib3'),
'threaded & urllib3')
def test_stat_error_name_threaded_urllib3(self):
server = self.server
server.response['sleep'] = 2
class SimpleSpider(Spider):
def prepare(self):
self.network_try_limit = 1
def task_generator(self):
grab = Grab(url=server.get_url(), timeout=1)
yield Task('page', grab=grab)
def task_page(self, grab, unused_task):
pass
bot = build_spider(SimpleSpider)
bot.run()
self.assertTrue('error:read-timeout-error' in bot.stat.counters)
| 2.328125
| 2
|
setup.py
|
snowdj/gluon-tutorials-zh
| 62
|
12777783
|
<filename>setup.py<gh_stars>10-100
#!/usr/bin/env python
import io
import os
import re
from setuptools import setup, find_packages
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
VERSION = find_version('d2lzh', '__init__.py')
requirements = [
'numpy',
'matplotlib',
'jupyter'
]
setup(
# Metadata
name='d2lzh',
version=VERSION,
author='Contributors',
author_email='<EMAIL>',
url='https://zh.d2l.ai',
description='Dive into Deep Learning (in Chinese) Utils',
long_description='Dive into Deep Learning Book (in Chinese) Utilities',
license='Apache-2.0',
# Package info
packages=find_packages(exclude=('tests',)),
zip_safe=True,
install_requires=requirements,
)
| 1.835938
| 2
|
setup.py
|
triptitripathi/clocwalk
| 11
|
12777784
|
<reponame>triptitripathi/clocwalk
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup, find_packages
from clocwalk import __version__
setup(
name='clocwalk',
version=__version__,
description='Project code and dependent component analysis tools.',
author='MyKings',
author_email='<EMAIL>',
url='https://github.com/MyKings/clocwalk',
packages=find_packages(),
include_package_data=True,
entry_points={
"console_scripts": [
"clocwalk = clocwalk.cli:main"
]
},
install_requires=[
'lxml',
'requests',
'requests[socks]',
'PyYAML',
],
extras_require={
'dev': [
'devpi>=2.1.0',
'prospector>=0.10.1',
],
'test': [
'coverage>=3.7.1',
'nose>=1.3.6',
],
'docs': [
'Sphinx>=1.3.1',
],
'build': [
'devpi>=2.1.0',
],
},
test_suite='nose.collector',
zip_safe=False,
)
| 1.195313
| 1
|
app/auth/forms.py
|
marknesh/Blogging-website
| 0
|
12777785
|
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField
from wtforms.validators import DataRequired,EqualTo,Email
from ..models import User
from wtforms import ValidationError
class RegistrationForm(FlaskForm):
email=StringField('Your email address',validators=[DataRequired(),Email()])
username=StringField('your username',validators=[DataRequired()])
password=PasswordField('Your password',validators=[DataRequired(),EqualTo('password_confirm',message='password must match')])
password_confirm=PasswordField('Confirm Passwords',validators=[DataRequired()])
submit=SubmitField('submit')
def validate_email(self,data_field):
if User.query.filter_by(email=data_field.data).first():
raise ValidationError("email already exists")
def validate_username(self,data_field):
if User.query.filter_by(username=data_field.data).first():
raise ValidationError("username already exists")
class LoginForm(FlaskForm):
email=StringField('Your email address',validators=[DataRequired(),Email()])
password=PasswordField('Password',validators=[DataRequired()])
remember=BooleanField('remember me')
submit=SubmitField('submit')
class BlogForm(FlaskForm):
title=StringField('Your Title',validators=[DataRequired()])
content=StringField('Content',validators=[DataRequired()])
submit = SubmitField('submit')
class CommentForm(FlaskForm):
comment=StringField('comment',validators=[DataRequired()])
submit = SubmitField('submit')
class SubscriberForm(FlaskForm):
email = StringField('Your email address', validators=[DataRequired(), Email()])
submit = SubmitField('submit')
| 3.046875
| 3
|
epde/structure.py
|
vnleonenko/EPDE
| 15
|
12777786
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 16:22:17 2021
@author: mike_ubuntu
"""
import numpy as np
from functools import reduce
import copy
import gc
import time
import datetime
import pickle
import warnings
import epde.globals as global_var
import torch
from epde.decorators import History_Extender, Reset_equation_status
from epde.interface.token_family import TF_Pool
from epde.factor import Factor
from epde.supplementary import Filter_powers, Population_Sort, flatten
import epde.moeadd.moeadd_stc as moeadd
def Check_Unqueness(obj, background):
return not any([elem == obj for elem in background])
def normalize_ts(Input):
# print('normalize_ts Input:', Input)
matrix = np.copy(Input)
# print(Matrix.shape)
if np.ndim(matrix) == 0:
raise ValueError('Incorrect input to the normalizaton: the data has 0 dimensions')
elif np.ndim(matrix) == 1:
return matrix
else:
for i in np.arange(matrix.shape[0]):
# print(matrix[i].shape)
std = np.std(matrix[i])
if std != 0:
matrix[i] = (matrix[i] - np.mean(matrix[i])) / std
else:
matrix[i] = 1
return matrix
class Complex_Structure(object):
def __init__(self, interelement_operator = np.add, *params):
self._history = ''
self.structure = None
self.interelement_operator = interelement_operator
def __eq__(self, other):
if type(other) != type(self):
raise ValueError('Type of self and other are different')
return (all([any([other_elem == self_elem for other_elem in other.structure]) for self_elem in self.structure]) and
all([any([other_elem == self_elem for self_elem in self.structure]) for other_elem in other.structure]) and
len(other.structure) == len(self.structure))
def set_evaluator(self, evaluator):
raise NotImplementedError('Functionality of this method has been moved to the evolutionary operator declaration')
def evaluate(self, structural = False):
assert len(self.structure) > 0, 'Attempt to evaluate an empty complex structure'
if len(self.structure) == 1:
return self.structure[0].evaluate(structural)
else:
# print([type(elem) for elem in self.structure])
return reduce(lambda x, y: self.interelement_operator(x, y.evaluate(structural)),
self.structure[1:], self.structure[0].evaluate(structural))
def reset_saved_state(self):
self.saved = {True:False, False:False}
self.saved_as = {True:None, False:None}
for elem in self.structure:
elem.reset_saved_state()
@property
def name(self):
pass
class Term(Complex_Structure):
__slots__ = ['_history', 'structure', 'interelement_operator', 'saved', 'saved_as',
'pool', 'max_factors_in_term', 'cache_linked', 'occupied_tokens_labels']
def __init__(self, pool, passed_term = None, max_factors_in_term = 1, forbidden_tokens = None,
interelement_operator = np.multiply):
super().__init__(interelement_operator)
self.pool = pool
self.max_factors_in_term = max_factors_in_term
if type(passed_term) == type(None):
self.Randomize(forbidden_tokens)
else:
self.Defined(passed_term)
if type(global_var.tensor_cache) != type(None):
self.use_cache()
self.reset_saved_state() # key - state of normalization, value - if the variable is saved in cache
@property
def cache_label(self):
if len(self.structure) > 1:
structure_sorted = sorted(self.structure, key = lambda x: x.cache_label)
cache_label = tuple([elem.cache_label for elem in structure_sorted])#reduce(form_label, structure_sorted, '')
else:
cache_label = self.structure[0].cache_label
return cache_label
def use_cache(self):
self.cache_linked = True
# print('structure:', self.structure)
for idx, _ in enumerate(self.structure):
if not self.structure[idx].cache_linked:
self.structure[idx].use_cache()
def Defined(self, passed_term):
self.structure = []
print('passed_term:', passed_term)
if isinstance(passed_term, (list, tuple)): #type(passed_term) == list or type(passed_term) == tuple:
for i, factor in enumerate(passed_term):
if type(factor) == str:
_, temp_f = self.pool.create(label = factor)
self.structure.append(temp_f)#; raise NotImplementedError
elif type(factor) == Factor:
self.structure.append(factor)
else:
raise ValueError('The structure of a term should be declared with str or factor.Factor obj, instead got', type(factor))
else: # Случай, если подается лишь 1 токен
if type(passed_term) == str:
_, temp_f = self.pool.create(label = passed_term)
self.structure.append(temp_f)#; raise NotImplementedError
elif type(passed_term) == Factor:
self.structure.append(passed_term)
else:
raise ValueError('The structure of a term should be declared with str or factor.Factor obj, instead got', type(passed_term))
def Randomize(self, forbidden_factors = None, **kwargs):
if np.sum(self.pool.families_cardinality(meaningful_only = True)) == 0:
raise ValueError('No token families are declared as meaningful for the process of the system search')
factors_num = np.random.randint(1, self.max_factors_in_term +1)
while True:
self.occupied_tokens_labels = []
occupied_by_factor, factor = self.pool.create(label = None, create_meaningful = True,
occupied = self.occupied_tokens_labels, **kwargs)
self.structure = [factor,]
self.occupied_tokens_labels.extend(occupied_by_factor)
factors_powers = {factor.label : 1}
for i in np.arange(1, factors_num):
occupied_by_factor, factor = self.pool.create(label = None, create_meaningful = False,
occupied = self.occupied_tokens_labels,
def_term_tokens = [token.label for token in self.structure],
**kwargs)
if factor.label in factors_powers:
factors_powers[factor.label] += 1
else:
factors_powers[factor.label] = 1
for param_idx, param_descr in factor.params_description.items():
if param_descr['name'] == 'power': power_param_idx = param_idx
if factors_powers[factor.label] == factor.params_description[power_param_idx]['bounds'][1]:
self.occupied_tokens_labels.append(factor.label)
self.structure.append(factor)
self.occupied_tokens_labels.extend(occupied_by_factor)
self.structure = Filter_powers(self.structure)
if type(forbidden_factors) == type(None):
break
elif all([(Check_Unqueness(factor, forbidden_factors) or not factor.status['unique_for_right_part']) for factor in self.structure]):
break
def evaluate(self, structural):
assert type(global_var.tensor_cache) != type(None), 'Currently working only with connected cache'
normalize = structural
if self.saved[structural] or (self.cache_label, normalize) in global_var.tensor_cache: #
value = global_var.tensor_cache.get(self.cache_label, normalized = normalize,
saved_as = self.saved_as[normalize])
value = value.reshape(value.size)
return value
else:
self.prev_normalized = normalize
value = super().evaluate(structural)
if normalize and np.ndim(value) != 1:
value = normalize_ts(value)
elif normalize and np.ndim(value) == 1 and np.std(value) != 0:
value = (value - np.mean(value))/np.std(value)
elif normalize and np.ndim(value) == 1 and np.std(value) == 0:
value = (value - np.mean(value))
if np.all([len(factor.params) == 1 for factor in self.structure]):
self.saved[normalize] = global_var.tensor_cache.add(self.cache_label, value, normalized = normalize) # Место возможных проблем: сохранение/загрузка нормализованных данных
if self.saved[normalize]: self.saved_as[normalize] = self.cache_label
value = value.reshape(value.size)
return value
def Filter_tokens_by_right_part(self, reference_target, equation, equation_position):
taken_tokens = [factor.label for factor in reference_target.structure if factor.status['unique_for_right_part']]
meaningful_taken = any([factor.status['meaningful'] for factor in reference_target.structure
if factor.status['unique_for_right_part']])
accept_term_try = 0
while True:
accept_term_try += 1
new_term = copy.deepcopy(self)
for factor_idx, factor in enumerate(new_term.structure):
if factor.label in taken_tokens:
new_term.Reset_occupied_tokens()
_, new_term.structure[factor_idx] = self.pool.create(create_meaningful=meaningful_taken,
occupied = new_term.occupied_tokens_labels + taken_tokens)
# print('try:', accept_term_try, 'suggested:', new_term.name)
#Возможная ошибка из-за (не) использования "create meaningful"
if Check_Unqueness(new_term, equation.structure[:equation_position] +
equation.structure[equation_position + 1 :]):
self.structure = new_term.structure
self.structure = Filter_powers(self.structure)
self.reset_saved_state()
break
if accept_term_try == 10:
warnings.warn('Can not create unique term, while filtering equation tokens in regards to the right part.')
if accept_term_try >= 10:
self.Randomize(forbidden_factors = new_term.occupied_tokens_labels + taken_tokens)
if accept_term_try == 100:
print('Something wrong with the random generation of term while running "Filter_tokens_by_right_part"')
print('proposed', new_term.name, 'for ', equation.text_form, 'with respect to', reference_target.name)
def Reset_occupied_tokens(self):
occupied_tokens_new = []
for factor in self.structure:
for token_family in self.pool.families:
if factor in token_family.tokens and factor.status['unique_token_type']:
occupied_tokens_new.extend([token for token in token_family.tokens])
elif factor.status['unique_specific_token']:
occupied_tokens_new.append(factor.label)
self.occupied_tokens_labels = occupied_tokens_new
@property
def available_tokens(self): #Переделать, т.к. меняется пул токенов: старая имплементация через лист
available_tokens = []
for token in self.pool.families:
if not all([label in self.occupied_tokens_labels for label in token.tokens]):
token_new = copy.deepcopy(token)
token_new.tokens = [label for label in token.tokens if label not in self.occupied_tokens_labels]
available_tokens.append(token_new)
return available_tokens
@property
def total_params(self):
return max(sum([len(element.params) - 1 for element in self.structure]), 1)
@property
def name(self):
form = ''
for token_idx in range(len(self.structure)):
form += self.structure[token_idx].name
if token_idx < len(self.structure) - 1:
form += ' * '
return form
@property
def contains_deriv(self):
return any([factor.is_deriv and factor.deriv_code != [None,] for factor in self.structure])
@property
def solver_form(self):
deriv_orders = []
deriv_powers = []
try:
coeff_tensor = np.ones_like(global_var.grid_cache.get('0'))
except KeyError:
raise NotImplementedError('No cache implemented')
for factor in self.structure:
if factor.is_deriv:
for param_idx, param_descr in factor.params_description.items():
if param_descr['name'] == 'power': power_param_idx = param_idx
deriv_orders.append(factor.deriv_code); deriv_powers.append(factor.params[power_param_idx])
else:
coeff_tensor = coeff_tensor * factor.evaluate()
# deriv_orders.append(factor.deriv_code); deriv_powers.append(1)
if len(deriv_powers) == 1:
deriv_powers = deriv_powers[0]
deriv_orders = deriv_orders[0]
coeff_tensor = torch.from_numpy(coeff_tensor)
return [coeff_tensor, deriv_orders, deriv_powers]
def __eq__(self, other):
return (all([any([other_elem == self_elem for other_elem in other.structure]) for self_elem in self.structure]) and
all([any([other_elem == self_elem for self_elem in self.structure]) for other_elem in other.structure]) and
len(other.structure) == len(self.structure))
class Equation(Complex_Structure):
__slots__ = ['_history', 'structure', 'interelement_operator', 'saved', 'saved_as',
'n_immutable', 'pool', 'terms_number', 'max_factors_in_term', 'operator',
'_target', 'target_idx', '_features', 'right_part_selected',
'_weights_final', 'weights_final_evald', '_weights_internal', 'weights_internal_evald',
'fitness_calculated', 'solver_form_defined', '_solver_form', '_fitness_value',
'crossover_selected_times', 'elite']
def __init__(self, pool, basic_structure, terms_number = 6, max_factors_in_term = 2,
interelement_operator = np.add): #eq_weights_eval
"""
Class for the single equation for the dynamic system.
attributes:
structure : list of Term objects \r\n
List, containing all terms of the equation; first 2 terms are reserved for constant value and the input function;
target_idx : int \r\n
Index of the target term, selected in the Split phase;
target : 1-d array of float \r\n
values of the Term object, reshaped into 1-d array, designated as target for application in sparse regression;
features : matrix of float \r\n
matrix, composed of terms, not included in target, value columns, designated as features for application in sparse regression;
fitness_value : float \r\n
Inverse value of squared error for the selected target 2function and features and discovered weights;
estimator : sklearn estimator of selected type \r\n
parameters:
Matrix of derivatives: first axis through various orders/coordinates in order: ['1', 'f', all derivatives by one coordinate axis
in increasing order, ...]; second axis: time, further - spatial coordinates;
tokens : list of strings \r\n
Symbolic forms of functions, including derivatives;
terms_number : int, base value of 6 \r\n
Maximum number of terms in the discovered equation;
max_factors_in_term : int, base value of 2\r\n
Maximum number of factors, that can form a term (e.g. with 2: df/dx_1 * df/dx_2)
"""
super().__init__(interelement_operator)
self.reset_state()
self.n_immutable = len(basic_structure)
# print('n_immutable', self.n_immutable)
self.pool = pool
self.structure = []
self.terms_number = terms_number; self.max_factors_in_term = max_factors_in_term
self.operator = None
if (terms_number < self.n_immutable):
raise Exception('Number of terms ({}) is too low to even contain all of the pre-determined ones'.format(terms_number))
for passed_term in basic_structure:
if isinstance(passed_term, Term):
self.structure.append(passed_term)
elif isinstance(passed_term, str):
self.structure.append(Term(self.pool, passed_term = passed_term,
max_factors_in_term = self.max_factors_in_term))
for i in range(len(basic_structure), terms_number):
check_test = 0
while True:
check_test += 1
new_term = Term(self.pool, max_factors_in_term = self.max_factors_in_term, passed_term = None)
if Check_Unqueness(new_term, self.structure):
break
self.structure.append(new_term)
for idx, _ in enumerate(self.structure):
self.structure[idx].use_cache()
@property
def contains_deriv(self):
return any([term.contains_deriv for term in self.structure])
@property
def forbidden_token_labels(self):
target_symbolic = [factor.label for factor in self.structure[self.target_idx].structure]
forbidden_tokens = set()
for token_family in self.pool.families:
for token in token_family.tokens:
if token in target_symbolic and token_family.status['unique_for_right_part']:
forbidden_tokens.add(token)
return forbidden_tokens
def reconstruct_to_contain_deriv(self):
while True:
replacement_idx = np.random.randint(low = 0, high = len(self.structure))
temp = Term(self.pool, max_factors_in_term=self.max_factors_in_term) # , forbidden_tokens=self.forbidden_token_labels
if temp.contains_deriv:
self.structure[replacement_idx] = temp
break
def reconstruct_by_right_part(self, right_part_idx):
new_eq = copy.deepcopy(self)
self.copy_properties_to(new_eq)
new_eq.target_idx = right_part_idx
if any([factor.status['unique_for_right_part'] for factor in new_eq.structure[right_part_idx].structure]):
for term_idx, term in enumerate(new_eq.structure):
if term_idx != right_part_idx:
term.Filter_tokens_by_right_part(new_eq.structure[right_part_idx], self, term_idx)
new_eq.reset_saved_state()
return new_eq
def evaluate(self, normalize = True, return_val = False, save = True):
self._target = self.structure[self.target_idx].evaluate(normalize)
feature_indexes = list(range(len(self.structure)))
feature_indexes.remove(self.target_idx)
for feat_idx in range(len(feature_indexes)):
if feat_idx == 0:
self._features = self.structure[feature_indexes[feat_idx]].evaluate(normalize)
elif feat_idx != 0:
temp = self.structure[feature_indexes[feat_idx]].evaluate(normalize)
self._features = np.vstack([self._features, temp])
else:
continue
if self._features.ndim == 1:
self._features = np.expand_dims(self._features, 1)
temp_feats = np.vstack([self._features, np.ones(self._features.shape[1])])
self._features = np.transpose(self._features); temp_feats = np.transpose(temp_feats)
if return_val:
self.prev_normalized = normalize
if normalize:
elem1 = np.expand_dims(self._target, axis = 1)
value = np.add(elem1, - reduce(lambda x,y: np.add(x, y), [np.multiply(weight, temp_feats[:,feature_idx])
for feature_idx, weight in np.ndenumerate(self.weights_internal)]))
else:
elem1 = np.expand_dims(self._target, axis = 1)
value = np.add(elem1, - reduce(lambda x,y: np.add(x, y), [np.multiply(weight, temp_feats[:,feature_idx])
for feature_idx, weight in np.ndenumerate(self.weights_final)]))
return value, self._target, self._features
else:
return None, self._target, self._features
def reset_state(self, reset_right_part : bool = True):
if reset_right_part: self.right_part_selected = False
self.weights_internal_evald = False
self.weights_final_evald = False
self.fitness_calculated = False
self.solver_form_defined = False
@Reset_equation_status(reset_input = False, reset_output = True)
@History_Extender('\n -> was copied by deepcopy(self)', 'n')
def __deepcopy__(self, memo = None):
clss = self.__class__
new_struct = clss.__new__(clss)
memo[id(self)] = new_struct
attrs_to_avoid_copy = ['_features', '_target']
# print(self.__slots__)
for k in self.__slots__:
try:
# print('successful writing', k, getattr(self, k))
if k not in attrs_to_avoid_copy:
setattr(new_struct, k, copy.deepcopy(getattr(self, k), memo))
else:
setattr(new_struct, k, None)
except AttributeError:
# print('unsuccessful writing', k)
pass
# new_struct.__dict__.update(self.__dict__)
# new_struct.structure = copy.deepcopy(self.structure)
return new_struct
def copy_properties_to(self, new_equation):
new_equation.weights_internal_evald = self.weights_internal_evald
new_equation.weights_final_evald = self.weights_final_evald
new_equation.right_part_selected = self.right_part_selected
new_equation.fitness_calculated = self.fitness_calculated
new_equation.solver_form_defined = self.solver_form_defined
try:
new_equation._fitness_value = self._fitness_value
except AttributeError:
pass
def add_history(self, add):
self._history += add
@property
def history(self):
return self._history
@property
def fitness_value(self):
return self._fitness_value
@fitness_value.setter
def fitness_value(self, val):
self._fitness_value = val
def penalize_fitness(self, coeff = 1.):
self._fitness_value = self._fitness_value*coeff
@property
def weights_internal(self):
if self.weights_internal_evald:
return self._weights_internal
else:
raise AttributeError('Internal weights called before initialization')
@weights_internal.setter
def weights_internal(self, weights):
self._weights_internal = weights
self.weights_internal_evald = True
self.weights_final_evald = False
@property
def weights_final(self):
if self.weights_final_evald:
return self._weights_final
else:
raise AttributeError('Final weights called before initialization')
@weights_final.setter
def weights_final(self, weights):
self._weights_final = weights
self.weights_final_evald = True
@property
def latex_form(self):
form = r""
for term_idx in range(len(self.structure)):
if term_idx != self.target_idx:
form += str(self.weights_final[term_idx]) if term_idx < self.target_idx else str(self.weights_final[term_idx-1])
form += ' * ' + self.structure[term_idx].latex_form + ' + '
form += str(self.weights_final[-1]) + ' = ' + self.structure[self.target_idx].text_form
return form
@property
def text_form(self):
form = ''
if self.weights_final_evald:
for term_idx in range(len(self.structure)):
if term_idx != self.target_idx:
form += str(self.weights_final[term_idx]) if term_idx < self.target_idx else str(self.weights_final[term_idx-1])
form += ' * ' + self.structure[term_idx].name + ' + '
form += str(self.weights_final[-1]) + ' = ' + self.structure[self.target_idx].name
else:
for term_idx in range(len(self.structure)):
form += 'k_'+ str(term_idx) + ' ' + self.structure[term_idx].name + ' + '
form += 'k_' + str(len(self.structure)) + ' = 0'
return form
def solver_form(self):
if self.solver_form_defined:
return self._solver_form
else:
self._solver_form = []
for term_idx in range(len(self.structure)):
if term_idx != self.target_idx:
term_form = self.structure[term_idx].solver_form
weight = self.weights_final[term_idx] if term_idx < self.target_idx else self.weights_final[term_idx-1]
term_form[0] = term_form[0] * weight
term_form[0] = torch.flatten(term_form[0]).unsqueeze(1).type(torch.FloatTensor)
self._solver_form.append(term_form)
free_coeff_weight = torch.from_numpy(np.full_like(a = global_var.grid_cache.get('0'),
fill_value = self.weights_final[-1]))
free_coeff_weight = torch.flatten(free_coeff_weight).unsqueeze(1).type(torch.FloatTensor)
target_weight = torch.from_numpy(np.full_like(a = global_var.grid_cache.get('0'),
fill_value = -1))
target_form = self.structure[self.target_idx].solver_form
target_form[0] = target_form[0] * target_weight
target_form[0] = torch.flatten(target_form[0]).unsqueeze(1).type(torch.FloatTensor)
self._solver_form.append([free_coeff_weight, [None,], 0])
self._solver_form.append(target_form)
self.solver_form_defined = True
return self._solver_form
@property
def state(self):
return self.text_form
@property
def described_variables(self):
eps=1e-7
described = set()
for term_idx, term in enumerate(self.structure):
if term_idx == self.target_idx:
described.update({factor.type for factor in term.structure})
else:
weight_idx = term_idx if term_idx < term_idx else term_idx - 1
if np.abs(self.weights_final[weight_idx]) > eps:
described.update({factor.type for factor in term.structure})
described = frozenset(described)
return described
def max_deriv_orders(self):
solver_form = self.solver_form()
max_orders = np.zeros(global_var.grid_cache.get('0').ndim)
def count_order(obj, deriv_ax):
if obj is None:
return 0
else:
return obj.count(deriv_ax)
for term in solver_form:
if isinstance(term[2], list):
for deriv_factor in term[1]:
orders = np.array([count_order(deriv_factor, ax) for ax #deriv_factor.count(ax)
in np.arange(max_orders.size)])
max_orders = np.maximum(max_orders, orders)
else:
orders = np.array([count_order(term[1], ax) for ax # term[1].count(ax)
in np.arange(max_orders.size)])
max_orders = np.maximum(max_orders, orders)
if np.max(max_orders) > 2:
raise NotImplementedError('The current implementation allows does not allow higher orders of equation, than 2.')
return max_orders
def boundary_conditions(self, main_var_key = ('u', (1.0,))):
required_bc_ord = self.max_deriv_orders() # We assume, that the maximum order of the equation here is 2
if global_var.grid_cache is None:
raise NameError('Grid cache has not been initialized yet.')
bconds = []
hardcoded_bc_relative_locations = {0 : None, 1 : (0,), 2 : (0, 1)}
tensor_shape = global_var.grid_cache.get('0').shape
def get_boundary_ind(tensor_shape, axis, rel_loc):
return tuple(np.meshgrid(*[np.arange(shape) if dim_idx != axis else min(int(rel_loc * shape), shape-1)
for dim_idx, shape in enumerate(tensor_shape)], indexing = 'ij'))
for ax_idx, ax_ord in enumerate(required_bc_ord):
for loc_fraction in hardcoded_bc_relative_locations[ax_ord]:
indexes = get_boundary_ind(tensor_shape, axis = ax_idx, rel_loc = loc_fraction)
coords = np.squeeze(np.array([global_var.grid_cache.get(str(idx))[indexes] for idx in np.arange(len(tensor_shape))])).T
vals = np.squeeze(global_var.tensor_cache.get(main_var_key)[indexes]).T
coords = torch.from_numpy(coords).type(torch.FloatTensor)
vals = torch.from_numpy(vals).type(torch.FloatTensor)
bconds.append([coords, vals])
print('shape of the grid', global_var.grid_cache.get('0').shape)
print('Obtained boundary conditions', len(bconds[0]))
return bconds
def standalone_boundary_conditions(max_deriv_orders, main_var_key = ('u', (1.0,))):
required_bc_ord = max_deriv_orders # We assume, that the maximum order of the equation here is 2
if global_var.grid_cache is None:
raise NameError('Grid cache has not been initialized yet.')
bconds = []
hardcoded_bc_relative_locations = {0 : None, 1 : (0,), 2 : (0, 1)}
tensor_shape = global_var.grid_cache.get('0').shape
def get_boundary_ind(tensor_shape, axis, rel_loc, old_way = False):
return tuple(np.meshgrid(*[np.arange(shape) if dim_idx != axis else min(int(rel_loc * shape), shape-1)
for dim_idx, shape in enumerate(tensor_shape)], indexing = 'ij'))
for ax_idx, ax_ord in enumerate(required_bc_ord):
for loc_fraction in hardcoded_bc_relative_locations[ax_ord]:
indexes = get_boundary_ind(tensor_shape, axis = ax_idx, rel_loc = loc_fraction)
coords = np.array([global_var.grid_cache.get(str(idx))[indexes] for idx in np.arange(len(tensor_shape))])
vals = global_var.tensor_cache.get(main_var_key)[indexes]
coords = torch.from_numpy(coords)
vals = torch.from_numpy(vals)
bconds.append([coords, vals])
return bconds
class SoEq(Complex_Structure, moeadd.moeadd_solution):
# __slots__ = ['tokens_indep', 'tokens_dep', 'equation_number']
def __init__(self, pool, terms_number, max_factors_in_term, sparcity = None, eq_search_iters = 100):
self.tokens_indep = TF_Pool(pool.families_meaningful) #[family for family in token_families if family.status['meaningful']]
self.tokens_dep = TF_Pool(pool.families_supplementary) #[family for family in token_families if not family.status['meaningful']]
self.equation_number = np.size(self.tokens_indep.families_cardinality())
if type(sparcity) != None: self.vals = sparcity
self.max_terms_number = terms_number; self.max_factors_in_term = max_factors_in_term
self.moeadd_set = False; self.eq_search_operator_set = False# ; self.evaluated = False
self.def_eq_search_iters = eq_search_iters
def use_default_objective_function(self):
from epde.eq_mo_objectives import system_discrepancy, system_complexity_by_terms
self.set_objective_functions([system_discrepancy, system_complexity_by_terms])
def set_objective_functions(self, obj_funs):
'''
Method to set the objective functions to evaluate the "quality" of the system of equations.
Parameters:
-----------
obj_funs - callable or list of callables;
function/functions to evaluate quality metrics of system of equations. Can return a single
metric (for example, quality of the process modelling with specific system), or
a list of metrics (for example, number of terms for each equation in the system).
The function results will be flattened after their application.
'''
assert callable(obj_funs) or all([callable(fun) for fun in obj_funs])
self.obj_funs = obj_funs
# import time
# print(len(self.obj_funs))
# time.sleep(10)
def set_eq_search_evolutionary(self, evolutionary):
# raise NotImplementedError('In current version, the evolutionary operatorshall be taken from global variables')
# assert type(evolutionary.coeff_calculator) != type(None), 'Defined evolutionary operator lacks coefficient calculator'
self.eq_search_evolutionary_strategy = evolutionary
self.eq_search_operator_set = True
def create_equations(self, population_size = 16, sparcity = None, eq_search_iters = None, EA_kwargs = dict()):
# if type(eq_search_iters) == type(None) and type(self.def_eq_search_iters) == type(None):
# raise ValueError('Number of iterations is not defied both in method parameter or in object attribute')
assert self.eq_search_operator_set
if type(eq_search_iters) == type(None): eq_search_iters = self.def_eq_search_iters
if type(sparcity) == type(None):
sparcity = self.vals
else:
self.vals = sparcity
self.population_size = population_size
self.eq_search_evolutionary_strategy.modify_block_params(block_label = 'truncation',
param_label = 'population_size',
value = population_size)
self.structure = []; self.eq_search_iters = eq_search_iters
token_selection = self.tokens_indep
self.vars_to_describe = {token_family.type for token_family in self.tokens_dep.families}
self.vars_to_describe = self.vars_to_describe.union({token_family.type for token_family in self.tokens_indep.families})
self.separated_vars = set()
for eq_idx in range(self.equation_number):
current_tokens = token_selection + self.tokens_dep
# print('Equation index', eq_idx, self.vals)
self.eq_search_evolutionary_strategy.modify_block_params(block_label = 'rps1', param_label = 'sparsity',
value = self.vals[eq_idx], suboperator_sequence = ['eq_level_rps', 'fitness_calculation', 'sparsity'])#(sparcity_value = self.vals[eq_idx])
self.eq_search_evolutionary_strategy.modify_block_params(block_label = 'rps2', param_label = 'sparsity',
value = self.vals[eq_idx], suboperator_sequence = ['eq_level_rps', 'fitness_calculation', 'sparsity'])#(sparcity_value = self.vals[eq_idx])
cur_equation, cur_eq_operator_error_abs, cur_eq_operator_error_structural = self.optimize_equation(pool = current_tokens,
strategy = self.eq_search_evolutionary_strategy, population_size = self.population_size,
separate_vars = self.separated_vars, EA_kwargs = EA_kwargs)
self.vars_to_describe.difference_update(cur_equation.described_variables)
self.separated_vars.add(frozenset(cur_equation.described_variables))
self.structure.append(cur_equation)
# self.single_vars_in_equation.update()
# cache.clear(full = False)
if not eq_idx == self.equation_number - 1:
global_var.tensor_cache.change_variables(cur_eq_operator_error_abs,
cur_eq_operator_error_structural)
# for idx, _ in enumerate(token_selection):
# token_selection[idx].change_variables(cur_eq_operator_error)
# obj_funs = np.array(flatten([func(self) for func in self.obj_funs]))
# np.array([self.evaluate(normalize = False),] + [eq.L0_norm for eq in self.structure])
moeadd.moeadd_solution.__init__(self, self.vals, self.obj_funs) # , return_val = True, self) super(
self.moeadd_set = True
def optimize_equation(self, pool, strategy, population_size, basic_terms : list = [],
separate_vars : set = None, EA_kwargs = dict()):
population = [Equation(pool, basic_terms, self.max_terms_number, self.max_factors_in_term)
for i in range(population_size)]
EA_kwargs['separate_vars'] = separate_vars
strategy.run(initial_population = population, EA_kwargs = EA_kwargs)
result = strategy.result
return result[0], result[0].evaluate(normalize = False, return_val=True)[0], result[0].evaluate(normalize = True, return_val=True)[0]
@staticmethod
def equation_opt_iteration(population, evol_operator, population_size, iter_index, separate_vars, strict_restrictions = True):
for equation in population:
if equation.described_variables in separate_vars:
equation.penalize_fitness(coeff = 0.)
population = Population_Sort(population)
population = population[:population_size]
gc.collect()
population = evol_operator.apply(population, separate_vars)
return population
def evaluate(self, normalize = True):
if len(self.structure) == 1:
value = self.structure[0].evaluate(normalize = normalize, return_val = True)[0]
else:
value = np.sum([equation.evaluate(normalize, return_val = True)[0] for equation in self.structure])
value = np.sum(np.abs(value))
return value
@property
def obj_fun(self):
# print('objective functions:', self.obj_funs)
# print('objective function values:', [func(self) for func in self.obj_funs], flatten([func(self) for func in self.obj_funs]))
return np.array(flatten([func(self) for func in self.obj_funs]))
def __call__(self):
assert self.moeadd_set, 'The structure of the equation is not defined, therefore no moeadd operations can be called'
return self.obj_fun
@property
def text_form(self):
form = ''
if len(self.structure) > 1:
for eq_idx, equation in enumerate(self.structure):
if eq_idx == 0:
form += '/ ' + equation.text_form + '\n'
elif eq_idx == len(self.structure) - 1:
form += '\ ' + equation.text_form + '\n'
else:
form += '| ' + equation.text_form + '\n'
else:
form += self.structure[0].text_form + '\n'
return form
def __eq__(self, other):
assert self.moeadd_set, 'The structure of the equation is not defined, therefore no moeadd operations can be called'
# eps = 1e-9
return (all([any([other_elem == self_elem for other_elem in other.structure]) for self_elem in self.structure]) and
all([any([other_elem == self_elem for self_elem in self.structure]) for other_elem in other.structure]) and
len(other.structure) == len(self.structure)) or all(np.isclose(self.obj_fun, other.obj_fun))
@property
def latex_form(self):
form = r"\begin{eqnarray*}"
for equation in self.structure:
form += equation.latex_form + r", \\ "
form += r"\end{eqnarray*}"
def __hash__(self):
return hash(tuple(self.vals))
| 2.03125
| 2
|
rhea/build/boards/xilinx/_anvyl.py
|
meetps/rhea
| 1
|
12777787
|
<gh_stars>1-10
#
# Copyright (c) 2015 <NAME>, <NAME>
#
from rhea.build import FPGA
from rhea.build.extintf import Port
from rhea.build.toolflow import ISE
class Anvyl(FPGA):
vendor = 'xilinx'
family = 'spartan6'
device = 'XC6SLX45'
package = 'CSG484'
speed = '-3'
_name = 'anvyl'
default_clocks = {
# 'clk' in documentation
'clock': dict(frequency=100e6, pins=('D11',),
iostandard='LVCMOS33')
}
default_ports = {
# on-board switches
'sw': dict(pins=('V5', 'U4', 'V3', 'P4',
'R4', 'P6', 'P5', 'P8',),
iostandard='LVCMOS18'),
# on-board leds
'led': dict(pins=('W3', 'Y4', 'Y1', 'Y3',
'AB4', 'W1', 'AB3', 'AA4'),
iostandard='LVCMOS18'),
# on-board push-buttons
'btn': dict(pins=('E6', 'D5', 'A3', 'AB9'),
iostandard='LVCMOS33')
}
def get_flow(self, top=None):
return ISE(brd=self, top=top)
| 2.09375
| 2
|
gridded/tests/test_variable.py
|
ajnisbet/gridded
| 0
|
12777788
|
<gh_stars>0
"""
tests of Variable object
Variable objects are mostly tested implicitly in other tests,
but good to have a few explicitly for the Variable object
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import netCDF4
from .utilities import get_test_file_dir
from gridded import Variable
test_dir = get_test_file_dir()
sample_sgrid_file = os.path.join(test_dir, 'staggered_sine_channel.nc')
def test_create_from_netcdf_dataset():
ds = netCDF4.Dataset(sample_sgrid_file)
var = Variable.from_netCDF(dataset=ds,
varname='u',
)
print(var.info)
assert var.data_file == 'staggered_sine_channel.nc'
assert var.data.shape == (5, 24)
| 2.34375
| 2
|
ultimatewebsite/members/admin.py
|
poshan0126/class-ultimate-classof2020
| 1
|
12777789
|
from django.contrib import admin
from members.models import Member
# Register your models here.
class MemberAdmin(admin.ModelAdmin):
'''
Admin View for Member
'''
list_display = ('full_name', 'email', 'phone_number',)
admin.site.register(Member, MemberAdmin)
| 2.125
| 2
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/_api/v1/keras/activations/__init__.py
|
JustinACoder/H22-GR3-UnrealAI
| 6
|
12777790
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Built-in activation functions.
"""
from __future__ import print_function
from tensorflow.python.keras.activations import deserialize
from tensorflow.python.keras.activations import elu
from tensorflow.python.keras.activations import get
from tensorflow.python.keras.activations import hard_sigmoid
from tensorflow.python.keras.activations import linear
from tensorflow.python.keras.activations import relu
from tensorflow.python.keras.activations import selu
from tensorflow.python.keras.activations import serialize
from tensorflow.python.keras.activations import sigmoid
from tensorflow.python.keras.activations import softmax
from tensorflow.python.keras.activations import softplus
from tensorflow.python.keras.activations import softsign
from tensorflow.python.keras.activations import tanh
del print_function
| 1.867188
| 2
|
src/extractors/tripadvisor/entity.py
|
jherrerotardon/spies
| 0
|
12777791
|
<reponame>jherrerotardon/spies
"""Crawler to extract reviews and data from an entity. """
from pyframework.exceptions.custom_exceptions import ArgumentException
from scrapy import Request
from ..crawler import Crawler
from ...models.restaurant import Restaurant
from ...models.review import Review
class Entity(Crawler):
"""Crawler to extract reviews and data from an entity. """
name = 'TripadvisorReviews'
_storage = 'reviews.pickle'
_entity_id = None
"""Entity ID where data will be scrapped. """
_entity_info = {}
"""Information from entity extracted from entity web. """
_review_obj = None
"""Reviews model instance to connect to DB. """
_data_cleaned = False
"""Flag to make sure that reviews are removed from DB before download news. """
def __init__(self, urls: list, **kwargs):
super(Entity, self).__init__(urls, **kwargs)
if 'entity_id' not in kwargs:
raise ArgumentException('entity_id is mandatory.')
self._entity_id = kwargs['entity_id']
self._review_obj = Review()
def parse(self, response, **kwargs):
super(Entity, self).parse(response)
if not self._data_cleaned:
self._clean_data_on_db()
self._data_cleaned = True
if not self._entity_info:
self._entity_info = self._processor.get_entity_info(response)
self._update_restaurant_info()
# Extract reviews from current page.
reviews = self._processor.get_reviews(response)
for review in reviews:
yield review
if next_page := self._processor.get_next_page_url(response):
yield Request(url=response.urljoin(next_page), callback=self.parse)
def _clean_data_on_db(self):
"""Remove reviews in BD from entity ID.
:return:
"""
self._review_obj.delete_many({
'entity_id': self._entity_id,
})
def _storage_items(self):
for review in self._items:
review['entity_id'] = self._entity_id
# Insert data on DB.
self._review_obj.insert_many(self._items)
self._items = []
def _update_restaurant_info(self):
"""Update info in DB of restaurant.
:return:
"""
Restaurant().update(self._entity_id, self._entity_info)
| 2.46875
| 2
|
eraserhead/request_storage.py
|
yozlet/django-eraserhead
| 216
|
12777792
|
# encoding: utf-8
from __future__ import print_function
import term
import humanfriendly
class RequestStorage(object):
""" Stores statistics about single request """
def __init__(self):
self.queryset_stats = []
def add_queryset_storage_instance(self, queryset_storage):
self.queryset_stats.append(queryset_storage)
@property
def total_wasted_memory(self):
wasted_memory = 0
for qs_storage in self.queryset_stats:
wasted_memory += qs_storage.total_wasted_memory
return wasted_memory
# Stats print methods
def print_stats(self):
""" Display statistics of current request """
if not self.queryset_stats:
return
term.writeLine("\n\t ERASERHEAD STATS \n", term.bold, term.reverse)
for queryset_storage in self.queryset_stats:
queryset_storage.print_stats()
print()
term.write("\t TOTAL WASTED MEMORY: ", term.bold, term.reverse)
term.write(" {}".format(humanfriendly.format_size(self.total_wasted_memory)), term.red)
print()
| 2.875
| 3
|
multitask_lightning/metrics/utils.py
|
heyoh-app/gestures-detector
| 8
|
12777793
|
<filename>multitask_lightning/metrics/utils.py
import torch
import numpy as np
import sys
sys.path.append("../")
from utils.train_utils import nms
def postprocess(tensor, tensor_type, threshold=None):
with torch.no_grad():
if tensor_type == "kpoint":
threshold = torch.nn.Threshold(threshold, 0)
tensor = nms(threshold(torch.sigmoid(tensor)))
elif tensor_type == "size":
tensor = torch.sigmoid(tensor)
return tensor
def masks_to_bboxes(masks, num_classes, max_bbox_per_img: int, threshold: float, out_size: int, is_predict: bool = False):
kpoint, side, size = torch.split(masks, [num_classes, 1, 1], 1) if is_predict else masks
if is_predict:
kpoint = postprocess(kpoint, tensor_type="kpoint", threshold=threshold)
size = postprocess(size, tensor_type="size")
else:
kpoint = torch.eq(kpoint, 1.).float()
binary_mask = torch.gt(kpoint, 0)
coords = binary_mask.nonzero().cpu().detach().numpy() # get list of object center coords [[B,C,H,W], ...]
probs = kpoint.masked_select(binary_mask).cpu().detach().numpy() # get list of probabilities of each object
heights = (binary_mask * size).masked_select(binary_mask).cpu().detach().numpy() # get list of heights of each object
# [x top-left, y top-left, x bottom-right, y bottom-right, class, probability, img idx in batch]
bboxes = np.zeros((coords.shape[0], 7))
bboxes[:, 0] = coords[:, 3] - 0.5 * heights * out_size
bboxes[:, 1] = coords[:, 2] - 0.5 * heights * out_size
bboxes[:, 2] = coords[:, 3] + 0.5 * heights * out_size
bboxes[:, 3] = coords[:, 2] + 0.5 * heights * out_size
bboxes[:, 4] = coords[:, 1]
bboxes[:, 5] = probs
bboxes[:, 6] = coords[:, 0]
bboxes_batch_list = []
for b in range(kpoint.shape[0]): # batch size
bboxes_batch = bboxes[bboxes[:, -1] == b][:, :-1]
if is_predict: # filter top k boxes
bboxes_batch = bboxes_batch[bboxes_batch[:, -1].argsort()][::-1]
bboxes_batch = bboxes_batch[:max_bbox_per_img]
else:
# [x top-left, y top-left, x bottom-right, y bottom-right, class, difficult, crowd]
bboxes_batch = np.hstack([
bboxes_batch[:, :5],
np.zeros((bboxes_batch.shape[0], 2))
])
bboxes_batch_list.append(bboxes_batch)
return bboxes_batch_list
| 2.203125
| 2
|
d4data/repos.py
|
kforti/D4Data
| 2
|
12777794
|
<gh_stars>1-10
class Repo:
def __init__(self, data_access, serializer, index_key=None):
self.data_access = data_access
self.serializer = serializer
self.index_key = index_key
def get(self, index):
data = self.data_access.read(index)
obj = self.serializer.deserialize(data)
return obj
def add(self, obj, index=None):
data = self.serializer.serialize(obj)
self.data_access(index, data)
| 2.5
| 2
|
saved_exp_results/SHHA-VGG16/SHHA.py
|
Linfeng-Lee/IIM
| 81
|
12777795
|
from easydict import EasyDict as edict
# init
__C_SHHA = edict()
cfg_data = __C_SHHA
__C_SHHA.TRAIN_SIZE = (512,1024)
__C_SHHA.DATA_PATH = '../ProcessedData/SHHA/'
__C_SHHA.TRAIN_LST = 'train.txt'
__C_SHHA.VAL_LST = 'val.txt'
__C_SHHA.VAL4EVAL = 'val_gt_loc.txt'
__C_SHHA.MEAN_STD = ([0.410824894905, 0.370634973049, 0.359682112932],
[0.278580576181, 0.26925137639, 0.27156367898])
__C_SHHA.LABEL_FACTOR = 1
__C_SHHA.LOG_PARA = 1.
__C_SHHA.RESUME_MODEL = ''#model path
__C_SHHA.TRAIN_BATCH_SIZE = 6 #imgs
__C_SHHA.VAL_BATCH_SIZE = 1 # must be 1
| 1.59375
| 2
|
src/server/ipc.py
|
gkovacs81/argus_server
| 0
|
12777796
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2021-02-25 20:06:08
# @Last Modified by: <NAME>
# @Last Modified time: 2021-02-25 20:06:12
import json
import logging
import socket
from os import environ
from monitoring.constants import (
ARM_AWAY,
ARM_STAY,
LOG_IPC,
MONITOR_ARM_AWAY,
MONITOR_ARM_STAY,
MONITOR_DISARM,
MONITOR_REGISTER_CARD,
POWER_GET_STATE,
MONITOR_SET_CLOCK,
MONITOR_SYNC_CLOCK,
MONITOR_UPDATE_CONFIG,
UPDATE_SECURE_CONNECTION,
MONITOR_UPDATE_KEYPAD,
MONITOR_GET_STATE,
MONITOR_GET_ARM,
UPDATE_SSH,
)
class IPCClient(object):
"""
Sending IPC messages from the REST API to the monitoring service
"""
_socket = None
def __init__(self):
self._logger = logging.getLogger(LOG_IPC)
if not self._socket:
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
self._socket.connect(environ["MONITOR_INPUT_SOCKET"])
except (ConnectionRefusedError, FileNotFoundError):
self._socket = None
def disarm(self, user_id):
return self._send_message({"action": MONITOR_DISARM, "user_id": user_id})
def get_arm(self):
return self._send_message({"action": MONITOR_GET_ARM})
def arm(self, arm_type, user_id):
if arm_type == ARM_AWAY:
return self._send_message({"action": MONITOR_ARM_AWAY, "user_id": user_id})
elif arm_type == ARM_STAY:
return self._send_message({"action": MONITOR_ARM_STAY, "user_id": user_id})
else:
print("Unknown arm type: %s" % arm_type)
def get_state(self):
return self._send_message({"action": MONITOR_GET_STATE})
def get_power_state(self):
return self._send_message({"action": POWER_GET_STATE})
def update_configuration(self):
return self._send_message({"action": MONITOR_UPDATE_CONFIG})
def update_keypad(self):
return self._send_message({"action": MONITOR_UPDATE_KEYPAD})
def register_card(self):
return self._send_message({"action": MONITOR_REGISTER_CARD})
def update_dyndns(self):
return self._send_message({"action": UPDATE_SECURE_CONNECTION})
def update_ssh(self):
return self._send_message({"action": UPDATE_SSH})
def sync_clock(self):
return self._send_message({"action": MONITOR_SYNC_CLOCK})
def set_clock(self, settings):
message = {"action": MONITOR_SET_CLOCK}
message = {**message, **settings}
return self._send_message(message)
def _send_message(self, message):
if self._socket:
self._socket.send(json.dumps(message).encode())
data = self._socket.recv(1024)
return json.loads(data.decode())
| 2.265625
| 2
|
vocab/urls.py
|
DiegoVilela/news-review-vocab
| 0
|
12777797
|
<gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('<slug:slug>/', views.entry_detail, name='entry_detail'),
path('episode/<slug:slug>/', views.episode_detail, name='episode_detail'),
]
| 1.578125
| 2
|
extract_features.py
|
respeecher/vae_workshop
| 5
|
12777798
|
<filename>extract_features.py
import numpy as np
import librosa
import os
import random
import re
import sys
import argparse
from scipy import signal
from utils.audio_utils import preemphasis
EPS = 1e-10
description = (
"""Extract spectral features from audio files. The script will search
for audio files and compute spectral features for each file (unless
filters are specified) and preserve the same folder structure as in the
audio directory.""")
parser = argparse.ArgumentParser(description=description)
parser.add_argument('audio_dir', type=str,
help='Directory with audio files')
parser.add_argument('target_dir', type=str,
help='Place to save feature files')
parser.add_argument('--dry_run', action='store_true',
help="Just find the files and don't write anything")
parser.add_argument('--filter_speakers', type=lambda s: s.split(','),
help='Comma-separated list of speakers to filter '
'filenames',
default='')
parser.add_argument('--sample_rate', type=int,
help='The input audio will be interpolated to this sample '
'rate.',
default=16000)
parser.add_argument('--hop_size', type=int,
help='Hop size for the STFT algorithm (in samples)',
default=160)
parser.add_argument('--n_fft', type=int,
help='Length of the window for the STFT algorithm (in '
'samples). The number of the resulting FFT '
'coefficients will be equal to (n_fft / 2) - 1.',
default=1024)
parser.add_argument('--n_filterbank', type=int,
help='Size of the mel filterbank.',
default=80)
parser.add_argument('--preemphasis', type=float,
help='Preemphasis coefficient.',
default=0.97)
parser.add_argument('--clip_before_log', type=float,
help='Clip filterbank powers to this value from below, '
'before taking logarithms.',
default=0.0001)
parser.add_argument('--trim_silence_db', type=float,
help='The threshold (in decibels) below reference to '
'consider as silence and trim it.',
default=20)
args = parser.parse_args()
def get_features(
fname,
sample_rate,
n_fft,
n_mel,
hop_size,
preemph,
clip_value,
trim_silence_db):
au, _ = librosa.load(fname, sample_rate, dtype=np.float64)
au, _ = librosa.effects.trim(au, trim_silence_db, np.max, 256, 128)
au = librosa.util.normalize(au)
if preemph:
au = preemphasis(au, preemph)
spec = librosa.stft(
au,
n_fft=n_fft,
hop_length=hop_size)
spec = np.abs(spec.T)**2
mel_filters = librosa.filters.mel(sample_rate, n_fft, n_mel,
fmin=125, fmax=7600)
mel_spec = np.dot(mel_filters, spec.T).T
mel_spec = np.clip(mel_spec, clip_value, None)
spec = np.clip(spec, clip_value, None)
return [arr.astype(np.float32) for arr in [au, spec, mel_spec]]
if args.dry_run:
print("WARNING: working in dry-run mode")
fnames = []
for root, dirs, files in os.walk(args.audio_dir, followlinks=True):
for f in files:
full_path = os.path.join(root, f)
passes = bool(re.search(r'\.(ogg|wav)$', f))
passes = passes and any(
[name in full_path for name in args.filter_speakers])
if passes:
fnames.append(os.path.join(root, f))
print("Found {} files to process".format(len(fnames)))
if os.path.exists(args.target_dir):
raise OSError(
"the target directory '{}' already exists!".format(args.target_dir))
if not args.dry_run:
os.makedirs(args.target_dir)
n_files = len(fnames)
for i, fname in enumerate(fnames):
au, spec, mel_spec = get_features(fname,
args.sample_rate,
args.n_fft,
args.n_filterbank,
args.hop_size,
args.preemphasis,
args.clip_before_log,
args.trim_silence_db)
feat_file = (re.sub(r'\.(ogg|wav)$', '.feat', fname)
.replace(os.path.normpath(args.audio_dir),
os.path.normpath(args.target_dir)))
progress = i / n_files * 100
print('{} -> {} [{:.1f}%]\r'.format(
fname, feat_file, progress), end='', flush=True)
if not args.dry_run:
os.makedirs(os.path.dirname(feat_file), exist_ok=True)
np.savez(feat_file,
au=au,
logspec=np.log(spec + EPS),
logmel=np.log(mel_spec + EPS))
print('\ndone')
| 3.25
| 3
|
custom/icds_reports/management/commands/run_custom_data_pull.py
|
satyaakam/commcare-hq
| 1
|
12777799
|
<reponame>satyaakam/commcare-hq
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from custom.icds_reports.data_pull.exporter import DataExporter
class Command(BaseCommand):
help = """
Dump data from a pre-defined custom query for ICDS data pull requests
or a sql file to generate a result zip file in the current directory.
The command returns the zip file name.
"""
def add_arguments(self, parser):
parser.add_argument('name', help="slug of a custom data pull or a sql file name/path")
parser.add_argument('db_alias', choices=settings.DATABASES)
parser.add_argument('--month', help="format YYYY-MM-DD")
parser.add_argument('--location_id')
parser.add_argument('-s', '--skip_confirmation', action='store_true')
parser.add_argument('-l', '--log_progress', action='store_true')
def handle(self, name, db_alias, *arg, **options):
if db_alias not in settings.DATABASES:
raise CommandError("Unexpected db alias")
month = options.get('month')
location_id = options.get('location_id')
skip_confirmation = options.get('skip_confirmation')
log_progress = options.get('log_progress')
exporter = DataExporter(name, db_alias, month, location_id)
if log_progress:
self._log(exporter.queries)
if skip_confirmation or self._get_confirmation():
return exporter.export()
def _log(self, queries):
print("Running queries now")
for sql in queries:
print(sql)
def _get_confirmation(self):
proceed = input("Continue?(YES)")
return proceed == "YES"
| 2.109375
| 2
|
src/sagesaver/mongo.py
|
Cozieee/sagesaver
| 0
|
12777800
|
<filename>src/sagesaver/mongo.py
import json
from datetime import datetime
def is_user_collection(name: str):
name_arr = name.split('.')
return (len(name_arr) > 1
and name_arr[0] not in ['admin', 'local', 'config'])
class Mongo:
def __init__(self, client, cache):
self.client = client
self.cache = cache
def get_user_totals(self):
totals = self.client().admin.command('top')['totals']
user_totals = {k: v for k,
v in totals.items() if is_user_collection(k)}
return user_totals
def last_active(self):
new_totals = self.get_user_totals()
new_dump = {
"timestamp": datetime.now().timestamp(),
"totals": new_totals
}
new_activity = True
try:
old_dump = self.cache.read()
new_activity = (json.dumps(old_dump['totals']) !=
json.dumps(new_totals))
except KeyError:
return datetime.now()
finally:
if new_activity:
self.cache.write(new_dump)
return datetime.fromtimestamp(old_dump['timestamp'])
| 2.328125
| 2
|
urchin/fs/plugin.py
|
kellen/urchinfs
| 2
|
12777801
|
<filename>urchin/fs/plugin.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
class Plugin(object):
"""Plugin encapsulates a set of processing classes"""
def __init__(self, indexer, matcher, extractor, merger, munger, formatter):
self.indexer = indexer
self.matcher = matcher
self.extractor = extractor
self.merger = merger
self.munger = munger
self.formatter = formatter
class Indexer(object):
"""Finds items to be indexed"""
component = "indexer"
def __init__(self, config):
pass
def index(self, path):
"""Return a list of paths to be indexed"""
raise NotImplementedError()
class MetadataMatcher(object):
"""For a given item path, matches paths from which metadata should be extracted"""
component = "matcher"
def __init__(self, config):
pass
def match(self, path):
"""Return a list of paths from which to extract metadata"""
raise NotImplementedError()
class MetadataExtractor(object):
"""Metadata extractor"""
component = "extractor"
def __init__(self, config):
pass
def extract(self, path):
"""
Takes a file path and returns a dict with string keys and sets of strings as values.
metadata[key] = set(value1, value2)
"""
raise NotImplementedError()
class MetadataMunger(object):
"""
Metadata munger
Cleans up metadata by manipulating/removing values or adding/removing structure
"""
component = "munger"
def __init__(self, config):
pass
def mung(self, metadata):
raise NotImplementedError()
class MetadataMerger(object):
"""Merges metadata when multiple sources are involved"""
component = "merger"
def __init__(self, config):
pass
def merge(self, metadata):
"""
Merges `metadata`
`metadata` is a dict of: source -> dict of metadata
each dict of metadata has string keys and sets of strings of values.
return a single single dict with string keys and sets of strings as values.
"""
raise NotImplementedError()
class Formatter(object):
"""Formatter for display names."""
component = "formatter"
def __init__(self, config):
pass
def format(self, original_name, metadata):
"""Takes the original file/directory name and associated metadata and returns one or more formatted "display names" """
raise NotImplementedError()
| 2.578125
| 3
|
qtt/algorithms/bias_triangles.py
|
dpfranke/qtt
| 0
|
12777802
|
""" Functionality to analyse bias triangles
@author: amjzwerver
"""
#%%
import numpy as np
import qcodes
import qtt
import qtt.pgeometry
import matplotlib.pyplot as plt
from qcodes.plots.qcmatplotlib import MatPlot
from qtt.data import diffDataset
def plotAnalysedLines(clicked_pts, linePoints1_2, linePt3_vert, linePt3_horz, linePt3_ints, intersect_point):
""" Plots lines based on three points clicked
Args:
clicked_pts (array): lines between the three points (1-2), (2-3)
linePoints1_2 (array): line fitted through points 1 and 2
linePt3_vert (array): vertical line through point 3
linePt3_horz (array): horizontal line through point 3
linePt3_ints (array): line through point 3 and its vert/horz intersection
with the line through point 1,2
intersect_point (array): intersection point point 3, line1_2
"""
qtt.pgeometry.plot2Dline(linePoints1_2, ':c', alpha = .5)
qtt.pgeometry.plot2Dline(linePt3_vert, ':b', alpha=.4)
qtt.pgeometry.plot2Dline(linePt3_horz, ':b', alpha=.4)
qtt.pgeometry.plot2Dline(linePt3_ints, ':b', alpha=.4)
qtt.pgeometry.plotPoints(intersect_point, '.b')
qtt.pgeometry.plotPoints(clicked_pts[:,2:3], '.b')
linePt3_ints_short = np.column_stack((intersect_point, clicked_pts[:,2:3]))
qtt.pgeometry.plotPoints(linePt3_ints_short, 'b')
def perpLineIntersect(ds, description, vertical = True, points=None):
""" Takes three points in a graph and calculates the length of a linepiece
between a line through points 1,2 and a vertical/horizontal line
through the third point. Uses the currently active figure.
Args:
ds (dataset): dataset with charge stability diagram and gate voltage in mV
vertical (bool): find intersection of point with line vertically (True)
or horizontally (False)
description:
Returns:
(dict): 'intersection_point' = intersetcion point
'distance' = length of line from 3rd clicked point to line
through clicked points 1 and 2
'clicked_points' = coordinates of the three clicked points
"""
diffDataset(ds, diff_dir='xy')
plt.figure(588); plt.clf()
MatPlot(ds.diff_dir_xy, num = 588)
ax = plt.gca()
ax.set_autoscale_on(False)
ax.set_xlabel(ax.get_xlabel()[:2])
ax.set_ylabel(ax.get_ylabel()[:2])
# ax = plt.gca()
# ax.set_autoscale_on(False)
if description == 'lever_arm' and vertical == True:
print('''Please click three points;
Point 1: on the addition line for the dot represented on the vertical axis
Point 2: further on the addition line for the dot represented on the vertical axis
Point 3: on the triple point at the addition line for the dot represented on the horizontal axis
where both dot levels are aligned''')
elif description == 'lever_arm' and vertical == False:
print('''Please click three points;
Point 1: on the addition line for the dot represented on the horizontal axis
Point 2: further on the addition line for the dot represented on the horizontal axis
Point 3: on the triple point at the addition line for the dot represented on the horizontal axis
where both dot levels are aligned''')
elif description == 'E_charging':
print('''Please click three points;
Point 1: on the (0, 1) - (0,2) addition line
Point 2: further on the (0, 1) - (0,2) addition line
Point 3: on the (0, 0) - (0, 1) addition line ''')
else:
# Do something here such that no three points need to be clicked
print('''Please make sure that the descirption argument of this function
is either 'lever_arm' or 'E_charging' ''')
if points is not None:
clicked_pts = points
else:
clicked_pts=qtt.pgeometry.ginput(3, '.c')
qtt.pgeometry.plotPoints(clicked_pts, ':c')
qtt.pgeometry.plotLabels(clicked_pts)
linePoints1_2 = qtt.pgeometry.fitPlane( clicked_pts[:, 0:2].T )
yy = clicked_pts[:,[2, 2]]; yy[1, -1] += 1
line_vertical = qtt.pgeometry.fitPlane( yy.T )
xx = clicked_pts[:,[2, 2]]; xx[0, -1] += 1
line_horizontal = qtt.pgeometry.fitPlane( xx.T )
if vertical == True:
i = qtt.pgeometry.intersect2lines(linePoints1_2, line_vertical)
intersectPoint = qtt.pgeometry.dehom(i)
line = intersectPoint[:,[0,0]]; line[0,-1]+=1
else:
i = qtt.pgeometry.intersect2lines(linePoints1_2, line_horizontal)
intersectPoint = qtt.pgeometry.dehom(i)
line = intersectPoint[:,[0,0]]; line[1,-1]+=1
linePt3_ints = qtt.pgeometry.fitPlane(line.T)
line_length = np.linalg.norm(intersectPoint - clicked_pts[:,2:3])
# visualize
plotAnalysedLines(clicked_pts, linePoints1_2, line_vertical, line_horizontal, linePt3_ints, intersectPoint)
return {'intersection_point': intersectPoint, 'distance': line_length, 'clicked_points': clicked_pts}
#def intersect2lines(l1, l2):
# """ Caculate intersection between 2 lines """
# r = qtt.pgeometry.null(np.vstack( (l1, l2)) )
# a = qtt.pgeometry.dehom(r[1])
# return a
def lever_arm(bias, results, fig = None):
""" Calculates the lever arm of a dot by using bias triangles in charge sensing. Uses currently active figure.
Args:
bias (float): bias in uV between source and drain while taking the bias triangles
results (dict): dictionary returned from the function perpLineIntersect
containing three points, the intersection point
between a line through 1,2 and the third point and the
length from points 3 to the intersection (horz/vert)
fig (bool): adds lever arm to title of already existing figure with points
Returns:
lev_arm (float): the lever arm of the assigned dot in uV/mV
"""
line_length = results['distance']
#in uV/mV
lev_arm = abs(bias/line_length)
if fig and len(plt.get_fignums()) != 0:
ax = plt.gca()
ax.set_autoscale_on(False)
if np.round(results['clicked_points'][0,2],2) == np.round(results['intersection_point'][0],2):
gate = ax.get_ylabel()[:2]
else:
gate = ax.get_xlabel()[:2]
title = 'Lever arm %s: %.2f $\mu$eV/mV'%(gate, lev_arm)
plt.annotate('Length %s: %.2f mV'%(gate, line_length), xy = (0.05, 0.1), xycoords='axes fraction', color = 'k')
plt.annotate(title, xy = (0.05, 0.05), xycoords='axes fraction', color = 'k')
ax.set_title(title)
return lev_arm
def E_charging(lev_arm, results, fig = None):
"""
Calculates the charging energy of a dot by using charge stability diagrams.
Uses currently active figure.
Args:
lev_arm (float): lever arm for the gate to the dot
results (dict): dictionary returned from the function perpLineIntersect
containing three points, the intersection point
between a line through 1,2 and the third point and the
length from points 3 to the intersection (horz/vert)
fig (bool): adds charging energy to title of already existing figure with points
Returns:
E_charging (float): the charging energy for the dot
"""
line_length = results['distance']
E_c = line_length * lev_arm
if fig and len(plt.get_fignums()) != 0:
ax = plt.gca()
ax.set_autoscale_on(False)
if np.round(results['clicked_points'][0,2],2) == np.round(results['intersection_point'][0],2):
gate = ax.get_ylabel()[:2]
else:
gate = ax.get_xlabel()[:2]
title = 'E_charging %s: %.2f meV'%(gate, E_c/1000)
plt.annotate('Length %s: %.2f mV'%(gate, line_length), xy = (0.05, 0.1), xycoords='axes fraction', color = 'k')
plt.annotate(title, xy = (0.05, 0.05), xycoords='axes fraction', color = 'k')
ax.set_title(title)
return E_c
def test_lever_arm():
lever_arm_fit = {'clicked_points': np.array([[ 24., 38., 40.], [135., 128., 111.]]), 'distance': 15., 'intersection_point': np.array([[ 40.4],[127.]])}
r=lever_arm(-800, lever_arm_fit)
assert(np.abs(r-53.3)<1e-1)
| 2.9375
| 3
|
sculpture/models/base_image.py
|
kingsdigitallab/crsbi-django
| 1
|
12777803
|
from django.db import models
import iipimage.fields
import iipimage.storage
import sculpture.constants
from .base_model import BaseModel
from .contributor import Contributor
from .image_status import ImageStatus
class BaseImage (BaseModel):
"""Abstract model for all images."""
SOURCE_FORMATS = (('analogue', 'Analogue'), ('digital', 'Digital'))
image = iipimage.fields.ImageField(
height_field='height', width_field='width',
storage=iipimage.storage.image_storage,
upload_to=iipimage.storage.get_image_path,
help_text='Accepts RAW, TIFF and JPEG files',
blank=True,
null=True)
status = models.ForeignKey(
ImageStatus, related_name='%(app_label)s_%(class)s_images', blank=True)
photographer = models.ForeignKey(
Contributor, blank=True, null=True,
related_name='%(app_label)s_%(class)s_images')
caption = models.CharField(blank=True, max_length=256)
description = models.TextField(blank=True)
source_format = models.CharField(blank=True, choices=SOURCE_FORMATS,
max_length=12)
upload_file_format = models.CharField(blank=True, max_length=10)
upload_filename = models.CharField(blank=True, max_length=128)
resolution = models.IntegerField(blank=True, help_text='Pixels per inch.',
null=True)
# width and height are automatically set (via height_field and
# width_field on self.image). The information may still be useful
# to see, so do not set editable=False.
width = models.IntegerField(help_text='Width in pixels.', blank=True)
height = models.IntegerField(help_text='Height in pixels.', blank=True)
bit_depth = models.CharField(blank=True, max_length=12)
colour_mode = models.CharField(blank=True, max_length=12)
camera_details = models.CharField(blank=True, max_length=256)
photo_date = models.CharField(
blank=True, help_text='Date the photo was taken (eg: 01 Jan 2013).',
max_length=32)
editing_software = models.CharField(blank=True, max_length=128)
editing_notes = models.TextField(blank=True)
order = models.IntegerField(default=2)
copyright = models.TextField(blank=True, verbose_name="Copyright Information")
class Meta:
abstract = True
app_label = 'sculpture'
ordering = ['order']
def __str__ (self):
return self.caption
def save (self, *args, **kwargs):
# Automatically populate various metadata fields based on the
# image file.
if not self.id:
# Set a status for the image.
self.status = ImageStatus.objects.get(
name=sculpture.constants.IMAGE_STATUS_GOOD)
super(BaseImage, self).save(*args, **kwargs)
def get_metadata (self, site):
"""Returns a dictionary of metadata for this image."""
metadata = {'filename': self.image.name, 'caption': self.caption,
'date': self.photo_date, 'visit date': site.visit_date}
contributors = '; '.join([author.name for author in site.authors.all()])
metadata['fieldworkers'] = contributors
if self.photographer:
metadata['photographer'] = self.photographer.name
for field, value in metadata.items():
if value is not None:
metadata[field] = value.encode('utf-8')
return metadata
# Linked thumbnail for use in the admin.
# Adapted from http://djangosnippets.org/snippets/162/
def linked_thumbnail (self):
"""Displays thumbnail-size image linked to the full image."""
html = ''
if self.id:
html = '<a href="%s">%s</a>' % (self.image.url, self.thumbnail(70))
return html
linked_thumbnail.allow_tags = True
def thumbnail (self, height=500):
"""Displays thumbnail-size image."""
html = ''
if self.id:
image = self.image
thumbnail_url = image.thumbnail_url(height=height)
html = '<div overflow: hidden;"><img height="%d" src="%s"></div>' % (height, thumbnail_url)
return html
thumbnail.allow_tags = True
# Linked thumbnail for use in the admin.
# Adapted from http://djangosnippets.org/snippets/162/
def thumbnail_site (self):
"""Displays thumbnail-size image linked to the full image."""
html = ''
if self.id:
html = '<a href="%s" title="Photograph by: %s">%s</a>' % (self.image.url, self.photographer, self.thumbnail(640))
return html
| 1.984375
| 2
|
general_reports/apps.py
|
hisham2k9/IMS-and-CAPA
| 0
|
12777804
|
from django.apps import AppConfig
class GeneralReportsConfig(AppConfig):
name = 'general_reports'
| 1.078125
| 1
|
doc/source/addons/modstubs.py
|
apodemus/pysalt3
| 0
|
12777805
|
<gh_stars>0
################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
# Ensure python 2.5 compatibility
import os
"""Autogenerate module documentation in *modules* directory for all modules listed in *modules.lst*"""
def autogen(list,output_path):
"""
Automatically generates module documentation files for modules in list file.
*list* filename of module list.
*output_path* output module documentation goes in this directory.
"""
print('Autogenerating module documentation for modules in',list)
# Read modules from list file
with open(list) as f:
modules=[m.strip() for m in f.readlines() if m.strip()[0]!='#']
for m in modules:
# Try importing the module this is needed for sphinx to function correctly
try:
exec('import %s' % m)
exec('del %s' % m)
except:
print('Module',m,'cannot be imported no documentation will be generated.')
continue
# Check if documentation file exists
if os.path.isfile(output_path.rstrip('/')+'/'+m+'.rst'):
print('Module documentation for',m,'exists, skipping.')
continue
# Write empty configuration file
with open(output_path.rstrip('/')+'/'+m+'.rst','w') as f:
f.write('*'*len(m)+'\n')
f.write(m+'\n')
f.write('*'*len(m)+'\n')
f.write('\n')
f.write('.. automodule:: '+m+'\n')
f.write(' :members:\n')
f.write(' :undoc-members:\n')
f.write(' :show-inheritance:\n')
f.write('\n')
| 0.902344
| 1
|
setup.py
|
rvega/isobar
| 241
|
12777806
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='isobar',
version='0.1.1',
description='A Python library to express and manipulate musical patterns',
long_description = open("README.md", "r").read(),
long_description_content_type = "text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ideoforms/isobar',
packages=find_packages(),
install_requires=['python-osc', 'mido', 'python-rtmidi'],
keywords=['sound', 'music', 'composition'],
classifiers=[
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Artistic Software',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers'
],
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-timeout']
)
| 1.179688
| 1
|
problems/shortestPath.py
|
lnogueir/swe-interview-prep
| 0
|
12777807
|
<filename>problems/shortestPath.py
'''
Prompt:
Given a graph, a source node v1, and a destination node v2,
find the shortest between node v1 and v2.
'''
def minimumPath(graph, v1, v2):
distances = [float('inf') for _ in range(len(graph))]
distances[v1] = 0
visited = [False for _ in range(len(graph))]
queue = [v1]
while len(queue) > 0:
v = queue.pop(0)
for neighbour in graph[v]:
distances[neighbour] = min(distances[neighbour], distances[v] + 1)
if not visited[neighbour]:
queue.append(neighbour)
visited[v] = True
return distances[v2]
graph1 = [
[1, 3],
[0,3,2],
[1],
[0, 1]
]
graph2 = [
[1,2],
[2,3],
[1,3],
[4],
[]
]
print(minimumPath(graph1, 0, 3))
print(minimumPath(graph2, 0, 4))
| 4.03125
| 4
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/lib/xmodule/xmodule/modulestore/__init__.py
|
osoco/better-ways-of-thinking-about-software
| 3
|
12777808
|
"""
This module provides an abstraction for working with XModuleDescriptors
that are stored in a database an accessible using their Location as an identifier
"""
import datetime
import logging
import re
import threading
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import contextmanager
from operator import itemgetter
from opaque_keys.edx.keys import AssetKey, CourseKey
from opaque_keys.edx.locations import Location # For import backwards compatibility
from pytz import UTC
from sortedcontainers import SortedKeyList
from xblock.core import XBlock
from xblock.plugin import default_select
from xblock.runtime import Mixologist
# The below import is not used within this module, but ir is still needed becuase
# other modules are imorting EdxJSONEncoder from here
from openedx.core.lib.json_utils import EdxJSONEncoder
from xmodule.assetstore import AssetMetadata
from xmodule.errortracker import make_error_tracker
from .exceptions import InsufficientSpecificationError, InvalidLocationError
log = logging.getLogger('edx.modulestore')
LIBRARY_ROOT = 'library.xml'
COURSE_ROOT = 'course.xml'
# List of names of computed fields on xmodules that are of type usage keys.
# This list can be used to determine which fields need to be stripped of
# extraneous usage key data when entering/exiting modulestores.
XMODULE_FIELDS_WITH_USAGE_KEYS = ['location', 'parent']
class ModuleStoreEnum:
"""
A class to encapsulate common constants that are used with the various modulestores.
"""
class Type:
"""
The various types of modulestores provided
"""
split = 'split'
mongo = 'mongo'
class RevisionOption:
"""
Revision constants to use for Module Store operations
Note: These values are passed into store APIs and only used at run time
"""
# both DRAFT and PUBLISHED versions are queried, with preference to DRAFT versions
draft_preferred = 'rev-opt-draft-preferred'
# only DRAFT versions are queried and no PUBLISHED versions
draft_only = 'rev-opt-draft-only'
# # only PUBLISHED versions are queried and no DRAFT versions
published_only = 'rev-opt-published-only'
# all revisions are queried
all = 'rev-opt-all'
class Branch:
"""
Branch constants to use for stores, such as Mongo, that have only 2 branches: DRAFT and PUBLISHED
Note: These values are taken from server configuration settings, so should not be changed without alerting DevOps # lint-amnesty, pylint: disable=line-too-long
"""
draft_preferred = 'draft-preferred'
published_only = 'published-only'
class BranchName:
"""
Branch constants to use for stores, such as Split, that have named branches
"""
draft = 'draft-branch'
published = 'published-branch'
library = 'library'
class UserID:
"""
Values for user ID defaults
"""
# Note: we use negative values here to (try to) not collide
# with user identifiers provided by actual user services.
# user ID to use for all management commands
mgmt_command = -1
# user ID to use for primitive commands
primitive_command = -2
# user ID to use for tests that do not have a django user available
test = -3
# user ID for automatic update by the system
system = -4
class SortOrder:
"""
Values for sorting asset metadata.
"""
ascending = 1
descending = 2
class BulkOpsRecord:
"""
For handling nesting of bulk operations
"""
def __init__(self):
self._active_count = 0
self.has_publish_item = False
self.has_library_updated_item = False
@property
def active(self):
"""
Return whether this bulk write is active.
"""
return self._active_count > 0
def nest(self):
"""
Record another level of nesting of this bulk write operation
"""
self._active_count += 1
def unnest(self):
"""
Record the completion of a level of nesting of the bulk write operation
"""
self._active_count -= 1
@property
def is_root(self):
"""
Return whether the bulk write is at the root (first) level of nesting
"""
return self._active_count == 1
class ActiveBulkThread(threading.local):
"""
Add the expected vars to the thread.
"""
def __init__(self, bulk_ops_record_type, **kwargs):
super().__init__(**kwargs)
self.records = defaultdict(bulk_ops_record_type)
class BulkOperationsMixin:
"""
This implements the :meth:`bulk_operations` modulestore semantics which handles nested invocations
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._active_bulk_ops = ActiveBulkThread(self._bulk_ops_record_type)
self.signal_handler = None
@contextmanager
def bulk_operations(self, course_id, emit_signals=True, ignore_case=False):
"""
A context manager for notifying the store of bulk operations. This affects only the current thread.
In the case of Mongo, it temporarily disables refreshing the metadata inheritance tree
until the bulk operation is completed.
"""
try:
self._begin_bulk_operation(course_id, ignore_case)
yield
finally:
self._end_bulk_operation(course_id, emit_signals, ignore_case)
# the relevant type of bulk_ops_record for the mixin (overriding classes should override
# this variable)
_bulk_ops_record_type = BulkOpsRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.BulkOpsRecord` for this course.
"""
if course_key is None:
return self._bulk_ops_record_type()
# Retrieve the bulk record based on matching org/course/run (possibly ignoring case)
if ignore_case:
for key, record in self._active_bulk_ops.records.items():
# Shortcut: check basic equivalence for cases where org/course/run might be None.
if (key == course_key) or ( # lint-amnesty, pylint: disable=too-many-boolean-expressions
(key.org and key.org.lower() == course_key.org.lower()) and
(key.course and key.course.lower() == course_key.course.lower()) and
(key.run and key.run.lower() == course_key.run.lower())
):
return record
return self._active_bulk_ops.records[course_key.for_branch(None)]
@property
def _active_records(self):
"""
Yield all active (CourseLocator, BulkOpsRecord) tuples.
"""
for course_key, record in self._active_bulk_ops.records.items():
if record.active:
yield (course_key, record)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if course_key.for_branch(None) in self._active_bulk_ops.records:
del self._active_bulk_ops.records[course_key.for_branch(None)]
def _start_outermost_bulk_operation(self, bulk_ops_record, course_key, ignore_case=False):
"""
The outermost nested bulk_operation call: do the actual begin of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
def _begin_bulk_operation(self, course_key, ignore_case=False):
"""
Begin a bulk operation on course_key.
"""
bulk_ops_record = self._get_bulk_ops_record(course_key, ignore_case)
# Increment the number of active bulk operations (bulk operations
# on the same course can be nested)
bulk_ops_record.nest()
# If this is the highest level bulk operation, then initialize it
if bulk_ops_record.is_root:
self._start_outermost_bulk_operation(bulk_ops_record, course_key, ignore_case)
def _end_outermost_bulk_operation(self, bulk_ops_record, structure_key):
"""
The outermost nested bulk_operation call: do the actual end of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
def _end_bulk_operation(self, structure_key, emit_signals=True, ignore_case=False):
"""
End the active bulk operation on structure_key (course or library key).
"""
# If no bulk op is active, return
bulk_ops_record = self._get_bulk_ops_record(structure_key, ignore_case)
if not bulk_ops_record.active:
return
# Send the pre-publish signal within the context of the bulk operation.
# Writes performed by signal handlers will be persisted when the bulk
# operation ends.
if emit_signals and bulk_ops_record.is_root:
self.send_pre_publish_signal(bulk_ops_record, structure_key)
bulk_ops_record.unnest()
# If this wasn't the outermost context, then don't close out the
# bulk operation.
if bulk_ops_record.active:
return
dirty = self._end_outermost_bulk_operation(bulk_ops_record, structure_key) # lint-amnesty, pylint: disable=assignment-from-no-return
# The bulk op has ended. However, the signal tasks below still need to use the
# built-up bulk op information (if the signals trigger tasks in the same thread).
# So re-nest until the signals are sent.
bulk_ops_record.nest()
if emit_signals and dirty:
self.send_bulk_published_signal(bulk_ops_record, structure_key)
self.send_bulk_library_updated_signal(bulk_ops_record, structure_key)
# Signals are sent. Now unnest and clear the bulk op for good.
bulk_ops_record.unnest()
self._clear_bulk_ops_record(structure_key)
def _is_in_bulk_operation(self, course_key, ignore_case=False):
"""
Return whether a bulk operation is active on `course_key`.
"""
return self._get_bulk_ops_record(course_key, ignore_case).active
def send_pre_publish_signal(self, bulk_ops_record, course_id):
"""
Send a signal just before items are published in the course.
"""
signal_handler = getattr(self, "signal_handler", None)
if signal_handler and bulk_ops_record.has_publish_item:
signal_handler.send("pre_publish", course_key=course_id)
def send_bulk_published_signal(self, bulk_ops_record, course_id):
"""
Sends out the signal that items have been published from within this course.
"""
if self.signal_handler and bulk_ops_record.has_publish_item:
# We remove the branch, because publishing always means copying from draft to published
self.signal_handler.send("course_published", course_key=course_id.for_branch(None))
bulk_ops_record.has_publish_item = False
def send_bulk_library_updated_signal(self, bulk_ops_record, library_id):
"""
Sends out the signal that library have been updated.
"""
if self.signal_handler and bulk_ops_record.has_library_updated_item:
self.signal_handler.send("library_updated", library_key=library_id)
bulk_ops_record.has_library_updated_item = False
class EditInfo: # pylint: disable=eq-without-hash
"""
Encapsulates the editing info of a block.
"""
def __init__(self, **kwargs):
self.from_storable(kwargs)
# For details, see caching_descriptor_system.py get_subtree_edited_by/on.
self._subtree_edited_on = kwargs.get('_subtree_edited_on', None)
self._subtree_edited_by = kwargs.get('_subtree_edited_by', None)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'previous_version': self.previous_version,
'update_version': self.update_version,
'source_version': self.source_version,
'edited_on': self.edited_on,
'edited_by': self.edited_by,
'original_usage': self.original_usage,
'original_usage_version': self.original_usage_version,
}
def from_storable(self, edit_info):
"""
De-serialize from Mongo-storable format to an object.
"""
# Guid for the structure which previously changed this XBlock.
# (Will be the previous value of 'update_version'.)
self.previous_version = edit_info.get('previous_version', None)
# Guid for the structure where this XBlock got its current field values.
# May point to a structure not in this structure's history (e.g., to a draft
# branch from which this version was published).
self.update_version = edit_info.get('update_version', None)
self.source_version = edit_info.get('source_version', None)
# Datetime when this XBlock's fields last changed.
self.edited_on = edit_info.get('edited_on', None)
# User ID which changed this XBlock last.
self.edited_by = edit_info.get('edited_by', None)
# If this block has been copied from a library using copy_from_template,
# these fields point to the original block in the library, for analytics.
self.original_usage = edit_info.get('original_usage', None)
self.original_usage_version = edit_info.get('original_usage_version', None)
def __repr__(self):
return ("{classname}(previous_version={self.previous_version}, "
"update_version={self.update_version}, "
"source_version={source_version}, "
"edited_on={self.edited_on}, "
"edited_by={self.edited_by}, "
"original_usage={self.original_usage}, "
"original_usage_version={self.original_usage_version}, "
"_subtree_edited_on={self._subtree_edited_on}, "
"_subtree_edited_by={self._subtree_edited_by})").format(
self=self,
classname=self.__class__.__name__,
source_version="UNSET" if self.source_version is None else self.source_version,
)
def __eq__(self, edit_info):
"""
Two EditInfo instances are equal iff their storable representations
are equal.
"""
return self.to_storable() == edit_info.to_storable()
def __neq__(self, edit_info):
"""
Two EditInfo instances are not equal if they're not equal.
"""
return not self == edit_info
class BlockData: # pylint: disable=eq-without-hash
"""
Wrap the block data in an object instead of using a straight Python dictionary.
Allows the storing of meta-information about a structure that doesn't persist along with
the structure itself.
"""
def __init__(self, **kwargs):
# Has the definition been loaded?
self.definition_loaded = False
self.from_storable(kwargs)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'fields': self.fields,
'block_type': self.block_type,
'definition': self.definition,
'defaults': self.defaults,
'asides': self.get_asides(),
'edit_info': self.edit_info.to_storable(),
}
def from_storable(self, block_data):
"""
De-serialize from Mongo-storable format to an object.
"""
# Contains the Scope.settings and 'children' field values.
# 'children' are stored as a list of (block_type, block_id) pairs.
self.fields = block_data.get('fields', {})
# XBlock type ID.
self.block_type = block_data.get('block_type', None)
# DB id of the record containing the content of this XBlock.
self.definition = block_data.get('definition', None)
# Scope.settings default values copied from a template block (used e.g. when
# blocks are copied from a library to a course)
self.defaults = block_data.get('defaults', {})
# Additional field data that stored in connected XBlockAsides
self.asides = block_data.get('asides', {})
# EditInfo object containing all versioning/editing data.
self.edit_info = EditInfo(**block_data.get('edit_info', {}))
def get_asides(self):
"""
For the situations if block_data has no asides attribute
(in case it was taken from memcache)
"""
if not hasattr(self, 'asides'):
self.asides = {} # pylint: disable=attribute-defined-outside-init
return self.asides
def __repr__(self):
return ("{classname}(fields={self.fields}, "
"block_type={self.block_type}, "
"definition={self.definition}, "
"definition_loaded={self.definition_loaded}, "
"defaults={self.defaults}, "
"asides={asides}, "
"edit_info={self.edit_info})").format(
self=self,
classname=self.__class__.__name__,
asides=self.get_asides()
)
def __eq__(self, block_data):
"""
Two BlockData objects are equal iff all their attributes are equal.
"""
attrs = ['fields', 'block_type', 'definition', 'defaults', 'asides', 'edit_info']
return all(getattr(self, attr, None) == getattr(block_data, attr, None) for attr in attrs)
def __neq__(self, block_data):
"""
Just define this as not self.__eq__(block_data)
"""
return not self == block_data
class IncorrectlySortedList(Exception):
"""
Thrown when calling find() on a SortedAssetList not sorted by filename.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class SortedAssetList(SortedKeyList): # lint-amnesty, pylint: disable=abstract-method
"""
List of assets that is sorted based on an asset attribute.
"""
def __init__(self, **kwargs):
self.filename_sort = False
key_func = kwargs.get('key', None)
if key_func is None:
kwargs['key'] = itemgetter('filename')
self.filename_sort = True
super().__init__(**kwargs)
def find(self, asset_id):
"""
Find the index of a particular asset in the list. This method is only functional for lists
sorted by filename. If the list is sorted on any other key, find() raises a
Returns: Index of asset, if found. None if not found.
"""
# Don't attempt to find an asset by filename in a list that's not sorted by filename.
if not self.filename_sort:
raise IncorrectlySortedList()
# See if this asset already exists by checking the external_filename.
# Studio doesn't currently support using multiple course assets with the same filename.
# So use the filename as the unique identifier.
idx = None
idx_left = self.bisect_left({'filename': asset_id.path})
idx_right = self.bisect_right({'filename': asset_id.path})
if idx_left != idx_right:
# Asset was found in the list.
idx = idx_left
return idx
def insert_or_update(self, asset_md):
"""
Insert asset metadata if asset is not present. Update asset metadata if asset is already present.
"""
metadata_to_insert = asset_md.to_storable()
asset_idx = self.find(asset_md.asset_id)
if asset_idx is not None:
# Delete existing metadata.
del self[asset_idx]
# Add new metadata sorted into the list.
self.add(metadata_to_insert)
class ModuleStoreAssetBase:
"""
The methods for accessing assets and their metadata
"""
def _find_course_asset(self, asset_key):
"""
Returns same as _find_course_assets plus the index to the given asset or None. Does not convert
to AssetMetadata; thus, is internal.
Arguments:
asset_key (AssetKey): what to look for
Returns:
Tuple of:
- AssetMetadata[] for all assets of the given asset_key's type
- the index of asset in list (None if asset does not exist)
"""
course_assets = self._find_course_assets(asset_key.course_key) # lint-amnesty, pylint: disable=no-member
all_assets = SortedAssetList(iterable=course_assets.setdefault(asset_key.block_type, []))
idx = all_assets.find(asset_key)
return course_assets, idx
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
course_assets, asset_idx = self._find_course_asset(asset_key)
if asset_idx is None:
return None
mdata = AssetMetadata(asset_key, asset_key.path, **kwargs)
all_assets = course_assets[asset_key.asset_type]
mdata.from_storable(all_assets[asset_idx])
return mdata
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Returns a list of asset metadata for all assets of the given asset_type in the course.
Args:
course_key (CourseKey): course identifier
asset_type (str): the block_type of the assets to return. If None, return assets of all types.
start (int): optional - start at this asset number. Zero-based!
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of SortOrder.ascending or SortOrder.descending
Returns:
List of AssetMetadata objects.
"""
course_assets = self._find_course_assets(course_key) # lint-amnesty, pylint: disable=no-member
# Determine the proper sort - with defaults of ('displayname', SortOrder.ascending).
key_func = None
sort_order = ModuleStoreEnum.SortOrder.ascending
if sort:
if sort[0] == 'uploadDate':
key_func = lambda x: x['edit_info']['edited_on']
if sort[1] == ModuleStoreEnum.SortOrder.descending:
sort_order = ModuleStoreEnum.SortOrder.descending
if asset_type is None:
# Add assets of all types to the sorted list.
all_assets = SortedAssetList(iterable=[], key=key_func)
for asset_type, val in course_assets.items(): # lint-amnesty, pylint: disable=redefined-argument-from-local
all_assets.update(val)
else:
# Add assets of a single type to the sorted list.
all_assets = SortedAssetList(iterable=course_assets.get(asset_type, []), key=key_func)
num_assets = len(all_assets)
start_idx = start
end_idx = min(num_assets, start + maxresults)
if maxresults < 0:
# No limit on the results.
end_idx = num_assets
step_incr = 1
if sort_order == ModuleStoreEnum.SortOrder.descending:
# Flip the indices and iterate backwards.
step_incr = -1
start_idx = (num_assets - 1) - start_idx
end_idx = (num_assets - 1) - end_idx
ret_assets = []
for idx in range(start_idx, end_idx, step_incr):
raw_asset = all_assets[idx]
asset_key = course_key.make_asset_key(raw_asset['asset_type'], raw_asset['filename'])
new_asset = AssetMetadata(asset_key)
new_asset.from_storable(raw_asset)
ret_assets.append(new_asset)
return ret_assets
# pylint: disable=unused-argument
def check_supports(self, course_key, method):
"""
Verifies that a modulestore supports a particular method.
Some modulestores may differ based on the course_key, such
as mixed (since it has to find the underlying modulestore),
so it's required as part of the method signature.
"""
return hasattr(self, method)
class ModuleStoreAssetWriteInterface(ModuleStoreAssetBase):
"""
The write operations for assets and asset metadata
"""
def _save_assets_by_type(self, course_key, asset_metadata_list, course_assets, user_id, import_only):
"""
Common private method that saves/updates asset metadata items in the internal modulestore
structure used to store asset metadata items.
"""
# Lazily create a sorted list if not already created.
assets_by_type = defaultdict(lambda: SortedAssetList(iterable=course_assets.get(asset_type, [])))
for asset_md in asset_metadata_list:
if asset_md.asset_id.course_key != course_key:
# pylint: disable=logging-format-interpolation
log.warning("Asset's course {} does not match other assets for course {} - not saved.".format(
asset_md.asset_id.course_key, course_key
))
continue
if not import_only:
asset_md.update({'edited_by': user_id, 'edited_on': datetime.datetime.now(UTC)})
asset_type = asset_md.asset_id.asset_type
all_assets = assets_by_type[asset_type]
all_assets.insert_or_update(asset_md)
return assets_by_type
def save_asset_metadata(self, asset_metadata, user_id, import_only):
"""
Saves the asset metadata for a particular course's asset.
Arguments:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if metadata save was successful, else False
"""
raise NotImplementedError()
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only):
"""
Saves a list of asset metadata for a particular course's asset.
Arguments:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if metadata save was successful, else False
"""
raise NotImplementedError()
def set_asset_metadata_attrs(self, asset_key, attrs, user_id):
"""
Base method to over-ride in modulestore.
"""
raise NotImplementedError()
def delete_asset_metadata(self, asset_key, user_id):
"""
Base method to over-ride in modulestore.
"""
raise NotImplementedError()
def set_asset_metadata_attr(self, asset_key, attr, value, user_id):
"""
Add/set the given attr on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr (str): which attribute to set
value: the value to set it to (any type pymongo accepts such as datetime, number, string)
user_id (int): user ID saving the asset metadata
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
return self.set_asset_metadata_attrs(asset_key, {attr: value}, user_id)
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
NOTE: unlike get_all_asset_metadata, this does not take an asset type because
this function is intended for things like cloning or exporting courses not for
clients to list assets.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
user_id (int): user ID copying the asset metadata
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class ModuleStoreRead(ModuleStoreAssetBase, metaclass=ABCMeta):
"""
An abstract interface for a database backend that stores XModuleDescriptor
instances and extends read-only functionality
"""
@abstractmethod
def has_item(self, usage_key):
"""
Returns True if usage_key exists in this ModuleStore.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_item(self, usage_key, depth=0, using_descriptor_system=None, **kwargs):
"""
Returns an XModuleDescriptor instance for the item at location.
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
usage_key: A :class:`.UsageKey` subclass instance
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_course_errors(self, course_key):
"""
Return a list of (msg, exception-or-None) errors that the modulestore
encountered when loading the course at course_id.
Raises the same exceptions as get_item if the location isn't found or
isn't fully specified.
Args:
course_key (:class:`.CourseKey`): The course to check for errors
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_items(self, course_id, qualifiers=None, **kwargs):
"""
Returns a list of XModuleDescriptor instances for the items
that match location. Any element of location that is None is treated
as a wildcard that matches any value
location: Something that can be passed to Location
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
def _block_matches(self, block, qualifiers):
"""
Return True or False depending on whether the field value (block contents)
matches the qualifiers as per get_items.
NOTE: Method only finds directly set value matches - not inherited nor default value matches.
For substring matching:
pass a regex object.
For arbitrary function comparison such as date time comparison:
pass the function as in start=lambda x: x < datetime.datetime(2014, 1, 1, 0, tzinfo=pytz.UTC)
Args:
block (dict, XBlock, or BlockData): either the BlockData (transformed from the db) -or-
a dict (from BlockData.fields or get_explicitly_set_fields_by_scope) -or-
the xblock.fields() value -or-
the XBlock from which to get the 'fields' value.
qualifiers (dict): {field: value} search pairs.
"""
if isinstance(block, XBlock):
# If an XBlock is passed-in, just match its fields.
xblock, fields = (block, block.fields)
elif isinstance(block, BlockData):
# BlockData is an object - compare its attributes in dict form.
xblock, fields = (None, block.__dict__)
else:
xblock, fields = (None, block)
def _is_set_on(key):
"""
Is this key set in fields? (return tuple of boolean and value). A helper which can
handle fields either being the json doc or xblock fields. Is inner function to restrict
use and to access local vars.
"""
if key not in fields:
return False, None
field = fields[key]
if xblock is not None:
return field.is_set_on(block), getattr(xblock, key)
else:
return True, field
for key, criteria in qualifiers.items():
is_set, value = _is_set_on(key)
if isinstance(criteria, dict) and '$exists' in criteria and criteria['$exists'] == is_set:
continue
if not is_set:
return False
if not self._value_matches(value, criteria):
return False
return True
def _value_matches(self, target, criteria):
"""
helper for _block_matches: does the target (field value) match the criteria?
If target is a list, do any of the list elements meet the criteria
If the criteria is a regex, does the target match it?
If the criteria is a function, does invoking it on the target yield something truthy?
If criteria is a dict {($nin|$in): []}, then do (none|any) of the list elements meet the criteria
Otherwise, is the target == criteria
"""
if isinstance(target, list):
return any(self._value_matches(ele, criteria) for ele in target)
elif isinstance(criteria, re.Pattern):
return criteria.search(target) is not None
elif callable(criteria):
return criteria(target)
elif isinstance(criteria, dict) and '$in' in criteria:
# note isn't handling any other things in the dict other than in
return any(self._value_matches(target, test_val) for test_val in criteria['$in'])
elif isinstance(criteria, dict) and '$nin' in criteria:
# note isn't handling any other things in the dict other than nin
return not any(self._value_matches(target, test_val) for test_val in criteria['$nin'])
else:
return criteria == target
@abstractmethod
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for this modulestore
that matches the supplied course_key.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_courses(self, **kwargs):
'''
Returns a list containing the top level XModuleDescriptors of the courses
in this modulestore. This method can take an optional argument 'org' which
will efficiently apply a filter so that only the courses of the specified
ORG in the CourseKey will be fetched.
'''
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_course(self, course_id, depth=0, **kwargs):
'''
Look for a specific course by its id (:class:`CourseKey`).
Returns the course descriptor, or None if not found.
'''
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def has_course(self, course_id, ignore_case=False, **kwargs):
'''
Look for a specific course id. Returns whether it exists.
Args:
course_id (CourseKey):
ignore_case (boolean): some modulestores are case-insensitive. Use this flag
to search for whether a potentially conflicting course exists in that case.
'''
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_parent_location(self, location, **kwargs):
'''
Find the location that is the parent of this location in this
course. Needed for path_to_location().
'''
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_modulestore_type(self, course_id):
"""
Returns a type which identifies which modulestore is servicing the given
course_id. The return can be either "xml" (for XML based courses) or "mongo" for MongoDB backed courses
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def has_published_version(self, xblock):
"""
Returns true if this xblock exists in the published course regardless of whether it's up to date
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@contextmanager
def bulk_operations(self, course_id, emit_signals=True, ignore_case=False): # pylint: disable=unused-argument
"""
A context manager for notifying the store of bulk operations. This affects only the current thread.
"""
yield
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class ModuleStoreWrite(ModuleStoreRead, ModuleStoreAssetWriteInterface, metaclass=ABCMeta):
"""
An abstract interface for a database backend that stores XModuleDescriptor
instances and extends both read and write functionality
"""
@abstractmethod
def update_item(self, xblock, user_id, allow_not_found=False, force=False, **kwargs):
"""
Update the given xblock's persisted repr. Pass the user's unique id which the persistent store
should save with the update if it has that ability.
:param allow_not_found: whether this method should raise an exception if the given xblock
has not been persisted before.
:param force: fork the structure and don't update the course draftVersion if there's a version
conflict (only applicable to version tracking and conflict detecting persistence stores)
:raises VersionConflictError: if org, course, run, and version_guid given and the current
version head != version_guid and force is not True. (only applicable to version tracking stores)
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def delete_item(self, location, user_id, **kwargs):
"""
Delete an item and its subtree from persistence. Remove the item from any parents (Note, does not
affect parents from other branches or logical branches; thus, in old mongo, deleting something
whose parent cannot be draft, deletes it from both but deleting a component under a draft vertical
only deletes it from the draft.
Pass the user's unique id which the persistent store
should save with the update if it has that ability.
:param force: fork the structure and don't update the course draftVersion if there's a version
conflict (only applicable to version tracking and conflict detecting persistence stores)
:raises VersionConflictError: if org, course, run, and version_guid given and the current
version head != version_guid and force is not True. (only applicable to version tracking stores)
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def create_course(self, org, course, run, user_id, fields=None, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseBlock
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def create_item(self, user_id, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The type of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None):
"""
Sets up source_course_id to point a course with the same content as the desct_course_id. This
operation may be cheap or expensive. It may have to copy all assets and all xblock content or
merely setup new pointers.
Backward compatibility: this method used to require in some modulestores that dest_course_id
pointed to an empty but already created course. Implementers should support this or should
enable creating the course from scratch.
Raises:
ItemNotFoundError: if the source course doesn't exist (or any of its xblocks aren't found)
DuplicateItemError: if the destination course already exists (with content in some cases)
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def delete_course(self, course_key, user_id, **kwargs):
"""
Deletes the course. It may be a soft or hard delete. It may or may not remove the xblock definitions
depending on the persistence layer and how tightly bound the xblocks are to the course.
Args:
course_key (CourseKey): which course to delete
user_id: id of the user deleting the course
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
# pylint: disable=abstract-method
class ModuleStoreReadBase(BulkOperationsMixin, ModuleStoreRead):
"""
Implement interface functionality that can be shared.
"""
def __init__( # lint-amnesty, pylint: disable=unused-argument
self,
contentstore=None,
doc_store_config=None, # ignore if passed up
metadata_inheritance_cache_subsystem=None, request_cache=None,
xblock_mixins=(), xblock_select=None, xblock_field_data_wrappers=(), disabled_xblock_types=lambda: [],
# temporary parms to enable backward compatibility. remove once all envs migrated
db=None, collection=None, host=None, port=None, tz_aware=True, user=None, password=<PASSWORD>,
# allow lower level init args to pass harmlessly
** kwargs
):
'''
Set up the error-tracking logic.
'''
super().__init__(**kwargs)
self._course_errors = defaultdict(make_error_tracker) # location -> ErrorLog
# TODO move the inheritance_cache_subsystem to classes which use it
self.metadata_inheritance_cache_subsystem = metadata_inheritance_cache_subsystem
self.request_cache = request_cache
self.xblock_mixins = xblock_mixins
self.xblock_select = xblock_select
self.xblock_field_data_wrappers = xblock_field_data_wrappers
self.disabled_xblock_types = disabled_xblock_types
self.contentstore = contentstore
def get_course_errors(self, course_key):
"""
Return list of errors for this :class:`.CourseKey`, if any. Raise the same
errors as get_item if course_key isn't present.
"""
# check that item is present and raise the promised exceptions if needed
# TODO (vshnayder): post-launch, make errors properties of items
# self.get_item(location)
assert isinstance(course_key, CourseKey)
return self._course_errors[course_key].errors
def get_errored_courses(self):
"""
Returns an empty dict.
It is up to subclasses to extend this method if the concept
of errored courses makes sense for their implementation.
"""
return {}
def get_course(self, course_id, depth=0, **kwargs):
"""
See ModuleStoreRead.get_course
Default impl--linear search through course list
"""
assert isinstance(course_id, CourseKey)
for course in self.get_courses(**kwargs):
if course.id == course_id:
return course
return None
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
Returns the course_id of the course if it was found, else None
Args:
course_id (CourseKey):
ignore_case (boolean): some modulestores are case-insensitive. Use this flag
to search for whether a potentially conflicting course exists in that case.
"""
# linear search through list
assert isinstance(course_id, CourseKey)
if ignore_case:
return next(
(
c.id for c in self.get_courses()
if c.id.org.lower() == course_id.org.lower() and
c.id.course.lower() == course_id.course.lower() and
c.id.run.lower() == course_id.run.lower()
),
None
)
else:
return next(
(c.id for c in self.get_courses() if c.id == course_id),
None
)
def has_published_version(self, xblock):
"""
Returns True since this is a read-only store.
"""
return True
def heartbeat(self):
"""
Is this modulestore ready?
"""
# default is to say yes by not raising an exception
return {'default_impl': True}
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
if self.contentstore:
self.contentstore.close_connections()
super().close_connections()
@contextmanager
def default_store(self, store_type):
"""
A context manager for temporarily changing the default store
"""
if self.get_modulestore_type(None) != store_type:
raise ValueError(f"Cannot set default store to type {store_type}")
yield
# pylint: disable=abstract-method
class ModuleStoreWriteBase(ModuleStoreReadBase, ModuleStoreWrite):
'''
Implement interface functionality that can be shared.
'''
def __init__(self, contentstore, **kwargs):
super().__init__(contentstore=contentstore, **kwargs)
self.mixologist = Mixologist(self.xblock_mixins)
def partition_fields_by_scope(self, category, fields):
"""
Return dictionary of {scope: {field1: val, ..}..} for the fields of this potential xblock
:param category: the xblock category
:param fields: the dictionary of {fieldname: value}
"""
result = defaultdict(dict)
if fields is None:
return result
classes = XBlock.load_class(category, select=prefer_xmodules)
cls = self.mixologist.mix(classes)
for field_name, value in fields.items():
field = getattr(cls, field_name)
result[field.scope][field_name] = value
return result
def create_course(self, org, course, run, user_id, fields=None, runtime=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Creates any necessary other things for the course as a side effect and doesn't return
anything useful. The real subclass should call this before it returns the course.
"""
# clone a default 'about' overview module as well
about_location = self.make_course_key(org, course, run).make_usage_key('about', 'overview')
about_descriptor = XBlock.load_class('about')
overview_template = about_descriptor.get_template('overview.yaml')
self.create_item(
user_id,
about_location.course_key,
about_location.block_type,
block_id=about_location.block_id,
definition_data={'data': overview_template.get('data')},
metadata=overview_template.get('metadata'),
runtime=runtime,
continue_version=True,
)
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
This base method just copies the assets. The lower level impls must do the actual cloning of
content.
"""
with self.bulk_operations(dest_course_id):
# copy the assets
if self.contentstore:
self.contentstore.copy_all_course_assets(source_course_id, dest_course_id)
return dest_course_id
def delete_course(self, course_key, user_id, **kwargs):
"""
This base method just deletes the assets. The lower level impls must do the actual deleting of
content.
"""
# delete the assets
if self.contentstore:
self.contentstore.delete_all_course_assets(course_key)
super().delete_course(course_key, user_id)
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
if self.contentstore:
self.contentstore._drop_database(database, collections, connections) # pylint: disable=protected-access
super()._drop_database(database, collections, connections)
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifing the
block that this item should be parented under
block_type: The type of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
item = self.create_item(user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields, **kwargs) # lint-amnesty, pylint: disable=line-too-long
parent = self.get_item(parent_usage_key)
parent.children.append(item.location)
self.update_item(parent, user_id)
def _flag_library_updated_event(self, library_key):
"""
Wrapper around calls to fire the library_updated signal
Unless we're nested in an active bulk operation, this simply fires the signal
otherwise a publish will be signalled at the end of the bulk operation
Arguments:
library_key - library_key to which the signal applies
"""
if self.signal_handler:
bulk_record = self._get_bulk_ops_record(library_key) if isinstance(self, BulkOperationsMixin) else None
if bulk_record and bulk_record.active:
bulk_record.has_library_updated_item = True
else:
self.signal_handler.send("library_updated", library_key=library_key)
def _emit_course_deleted_signal(self, course_key):
"""
Helper method used to emit the course_deleted signal.
"""
if self.signal_handler:
self.signal_handler.send("course_deleted", course_key=course_key)
def _emit_item_deleted_signal(self, usage_key, user_id):
"""
Helper method used to emit the item_deleted signal.
"""
if self.signal_handler:
self.signal_handler.send("item_deleted", usage_key=usage_key, user_id=user_id)
def only_xmodules(identifier, entry_points):
"""Only use entry_points that are supplied by the xmodule package"""
from_xmodule = [entry_point for entry_point in entry_points if entry_point.dist.key == 'xmodule']
return default_select(identifier, from_xmodule)
def prefer_xmodules(identifier, entry_points):
"""Prefer entry_points from the xmodule package"""
from_xmodule = [entry_point for entry_point in entry_points if entry_point.dist.key == 'xmodule']
if from_xmodule:
return default_select(identifier, from_xmodule)
else:
return default_select(identifier, entry_points)
| 2.375
| 2
|
frame.py
|
lkesteloot/clock
| 21
|
12777809
|
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import math
from vector import Vector
from config import DPI, TAU, TIGHT_LARGE_BOLT_RADIUS, WALL_ANCHOR_RADIUS, WALL_ANCHOR_OFFSET
import draw
PADDING = DPI*1
CORNER_RADIUS = DPI*0.25
CORNER_POINTS = 32
FOOT_WIDTH = PADDING - 2*CORNER_RADIUS
ADD_FEET = False
ADD_WALL_ANCHOR_HOLES = False
def generate(data, color):
# Deduce size and position of frame, and its holes, from the existing data.
minX = DPI*100
minY = DPI*100
maxX = -DPI*100
maxY = -DPI*100
floorY = -DPI*100
holes = []
for piece in data["pieces"]:
cx = piece["cx"]
cy = piece["cy"]
minX = min(minX, cx)
minY = min(minY, cy)
maxX = max(maxX, cx)
maxY = max(maxY, cy)
for v in piece["points"]:
floorY = max(floorY, cy + v.y)
for hole in holes:
if hole["cx"] == cx and hole["cy"] == cy:
break
else:
holes.append({
"cx": cx,
"cy": cy,
"r": TIGHT_LARGE_BOLT_RADIUS,
})
# Add hole in lower-left since there are no axles there.
holes.append({
"cx": minX,
"cy": maxY,
"r": TIGHT_LARGE_BOLT_RADIUS,
})
sys.stderr.write("The frame has %d holes.\n" % len(holes))
# Expand margin.
minX -= PADDING
minY -= PADDING
maxX += PADDING
maxY += PADDING
floorY += PADDING
# Draw frame.
P = []
P.append(Vector(minX, minY))
P.append(Vector(maxX, minY))
if ADD_FEET:
P.append(Vector(maxX, floorY))
P.append(Vector(maxX - FOOT_WIDTH, floorY))
P.append(Vector(maxX - FOOT_WIDTH, maxY))
P.append(Vector(minX + FOOT_WIDTH, maxY))
P.append(Vector(minX + FOOT_WIDTH, floorY))
P.append(Vector(minX, floorY))
else:
P.append(Vector(maxX, maxY))
P.append(Vector(minX, maxY))
# Do not close this, the round_corners() function does it.
P = draw.round_corners(P, CORNER_RADIUS, CORNER_POINTS)
width = (maxX - minX)/DPI
height = ((floorY if ADD_FEET else maxY) - minY)/DPI
sys.stderr.write("Frame is %.1fx%.1f inches\n" % (width, height))
if width > 24 or height > 18:
sys.stderr.write("------------------ FRAME TOO LARGE -----------------------\n")
# Front piece.
piece = {
"cx": 0,
"cy": 0,
"cz": 9,
"type": "frame",
"color": color,
"speed": 0,
"points": P,
"holes": holes,
}
data["pieces"].append(piece)
# Back piece.
piece = piece.copy()
# Add holes for hanging frame to wall.
holes = holes[:]
if ADD_WALL_ANCHOR_HOLES:
# XXX Can probably delete this.
piece["holes"] = holes
holes.append({
"cx": minX + WALL_ANCHOR_OFFSET + PADDING,
"cy": minY + PADDING,
"r": WALL_ANCHOR_RADIUS,
})
holes.append({
"cx": maxX - WALL_ANCHOR_OFFSET - PADDING,
"cy": minY + PADDING,
"r": WALL_ANCHOR_RADIUS,
})
holes.append({
"cx": minX + WALL_ANCHOR_OFFSET + PADDING,
"cy": maxY - PADDING,
"r": WALL_ANCHOR_RADIUS,
})
holes.append({
"cx": maxX - WALL_ANCHOR_OFFSET - PADDING,
"cy": maxY - PADDING,
"r": WALL_ANCHOR_RADIUS,
})
piece["cz"] = -3
data["pieces"].append(piece)
| 2.5
| 2
|
is_isogram.py
|
brennanbrown/code-challenges
| 1
|
12777810
|
<gh_stars>1-10
# Function that determines whether a string that contains only letters is an isogram.
# Assume the empty string is an isogram. Ignore letter case.
def is_isogram(string):
string = string.lower()
for i in range(len(string)):
for j in range(i + 1, len(string)):
if(string[i] == string[j]):
return False
return True
Test.assert_equals(is_isogram("Dermatoglyphics"), True )
Test.assert_equals(is_isogram("isogram"), True )
Test.assert_equals(is_isogram("aba"), False, "same chars may not be adjacent" )
Test.assert_equals(is_isogram("moOse"), False, "same chars may not be same case" )
Test.assert_equals(is_isogram("isIsogram"), False )
Test.assert_equals(is_isogram(""), True, "an empty string is a valid isogram" )
| 4.0625
| 4
|
src/main.py
|
hyd2016/temporal-prediction
| 1
|
12777811
|
<gh_stars>1-10
# coding=utf-8
"""
采用时间度量的半监督链接预测方法
"""
import argparse
import networkx as nx
import pandas as pd
from networkx import Graph
from typing import List
import tmlp
def parse_args():
'''
Parses the node2vec arguments.
'''
parser = argparse.ArgumentParser(description="Run node2vec.")
parser.add_argument('--input', nargs='?', default='karate.edgelist',
help='Input graph path')
parser.add_argument('--output', nargs='?', default='karate.emb',
help='Embeddings path')
parser.add_argument('--dimensions', type=int, default=128,
help='Number of dimensions. Default is 128.')
parser.add_argument('--weighted', dest='weighted', action='store_true',
help='Boolean specifying (un)weighted. Default is unweighted.')
parser.add_argument('--unweighted', dest='unweighted', action='store_false')
parser.set_defaults(weighted=False)
parser.add_argument('--directed', dest='directed', action='store_true',
help='Graph is (un)directed. Default is undirected.')
parser.add_argument('--undirected', dest='undirected', action='store_false')
parser.set_defaults(directed=False)
return parser.parse_args()
def read_graph(slice_count):
"""
Reads the input network in networkx.
"""
col_index = ["x", "y", "time"]
result = pd.read_table('email-Eu-core-temporal-Dept1.txt', sep=' ', header=None, names=col_index)
max_time = result['time'].max()
slice_num = max_time / slice_count + 1
G_test = nx.Graph()
test_data = result[(result['time'] >= (slice_count - 1) * slice_num) & (
result['time'] < slice_count * slice_num)].iloc[:, 0:2]
# 测试集
edge_tuples = [tuple(xi) for xi in test_data.values]
G_test.add_nodes_from(result['x'].tolist())
G_test.add_nodes_from(result['y'].tolist())
G_test.add_edges_from(edge_tuples)
# 训练集
edge_tuples = [tuple(xi) for xi in result.values]
G = nx.Graph()
G.add_nodes_from(result['x'].tolist())
G.add_nodes_from(result['y'].tolist())
G.add_weighted_edges_from(edge_tuples)
for edge in G.edges():
if G[edge[0]][edge[1]]['weight'] > 1:
G[edge[0]][edge[1]]['weight'] = 1
return result, G, G_test
def main(args):
"""
Pipeline for representational learning for all nodes in a graph.
"""
resent_time = 3456000
train_data, nx_G, G_test = read_graph(20)
G = tmlp.Graph(nx_G, train_data, G_test, args.directed, resent_time)
G.predict()
if __name__ == "__main__":
args = parse_args()
main(args)
| 2.765625
| 3
|
saleor/lib/python3.7/site-packages/tests/post_processor/post_processor_tests.py
|
cxsper/saleor
| 2
|
12777812
|
from .models import VersatileImagePostProcessorTestModel
from ..tests import VersatileImageFieldBaseTestCase
class VersatileImageFieldPostProcessorTestCase(VersatileImageFieldBaseTestCase):
@classmethod
def setUpTestData(cls):
cls.instance = VersatileImagePostProcessorTestModel.objects.create(
image='python-logo.jpg'
)
def test_post_processor(self):
"""
Ensure versatileimagefield.registry.autodiscover raises the
appropriate exception when trying to import on versatileimage.py
modules.
"""
self.instance.create_on_demand = True
self.assertEqual(
self.instance.image.crop['100x100'].url,
'/media/__sized__/python-logo-2c88a725748e22ee.jpg'
)
def test_obscured_file_delete(self):
self.assertImageDeleted(self.instance.image)
| 2.40625
| 2
|
src/deepsensemaking/__init__.py
|
deepsensemaking/sensemaking
| 0
|
12777813
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
"""
sensemaking: Auxiliary Python Modules
"""
from sensemaking.testing import test
if __name__ == "__main__":
pass
| 1.078125
| 1
|
accounts/tests.py
|
ssa17021992/djrest
| 0
|
12777814
|
import json
from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
from rest_framework import status
from .models import User, SignUpCode
from .auth import auth_token, passwd_token
class UserTestCase(TestCase):
"""User test case"""
def setUp(self):
self.user = User(
firstName='User',
middleName='User',
lastName='User',
username='user',
password='<PASSWORD>',
birthday='1986-05-12',
phone='55 4351 8691'
)
self.user.set_password('<PASSWORD>')
self.user.save()
self.signup_code = SignUpCode.objects.create(email='<EMAIL>')
self.client = Client()
def tearDown(self):
self.user.delete()
self.signup_code.delete()
def test_password_hash(self):
self.assertTrue(self.user.password.startswith('<PASSWORD>'))
self.assertEqual(len(self.user.password), 78)
def test_signup_available(self):
path = reverse('signup-available')
response = self.client.post(path, json.dumps({
'username': 'user2'
}), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_signup_mail(self):
path = reverse('signup-mail')
response = self.client.post(path, json.dumps({
'email': '<EMAIL>'
}), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_signup_check(self):
path = reverse('signup-check')
response = self.client.post(path, json.dumps({
'email': '<EMAIL>',
'code': self.signup_code.code
}), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_signup(self):
path = reverse('accounts-signup')
response = self.client.post(path, json.dumps({
'firstName': 'User 2',
'middleName': 'User 2',
'lastName': 'User 2',
'username': 'user2',
'password': '<PASSWORD>',
'email': '<EMAIL>',
'birthday': '1987-06-20',
'phone': '55 4530 2942',
'code': self.signup_code.code
}), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_signin(self):
path = reverse('accounts-signin')
response = self.client.post(path, json.dumps({
'username': 'user', 'password': '<PASSWORD>'
}), content_type='application/json')
data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('token' in data.keys())
def test_passwd_change(self):
path = reverse('passwd-change')
token = auth_token(self.user)
response = self.client.post(path, json.dumps({
'current': 'p455w0rd', 'password': '<PASSWORD>'
}), content_type='application/json',
HTTP_AUTHORIZATION='Token %s' % token
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_passwd_mail(self):
path = reverse('passwd-mail')
response = self.client.post(path, json.dumps({
'username': 'user'
}), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_passwd_check(self):
path = reverse('passwd-check')
token = passwd_token(self.user)
response = self.client.post(path,
content_type='application/json',
HTTP_AUTHORIZATION='Token %s' % token
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_passwd_reset(self):
path = reverse('passwd-reset')
token = passwd_token(self.user)
response = self.client.post(path, json.dumps({
'password': '<PASSWORD>'
}), content_type='application/json',
HTTP_AUTHORIZATION='Token %s' % token
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_me_profile_update(self):
path = reverse('me-profile')
token = auth_token(self.user)
response = self.client.patch(path, json.dumps({
'firstName': 'User',
'middleName': 'User',
'lastName': 'User',
'birthday': '1994-08-14'
}), content_type='application/json',
HTTP_AUTHORIZATION='Token %s' % token
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_list(self):
path = reverse('user-list')
token = auth_token(self.user)
response = self.client.get(
path,
content_type='application/json',
HTTP_AUTHORIZATION='Token %s' % token
)
data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('results' in data.keys())
self.assertTrue(isinstance(data['results'], list))
| 2.578125
| 3
|
gamefixes/1664350.py
|
manueliglesiasgarcia/protonfixes
| 0
|
12777815
|
""" Ship Graveyard Simulator Prologue
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" needs builtin vulkan-1
"""
util.set_environment('WINEDLLOVERRIDES','vulkan-1=b')
| 1.351563
| 1
|
aic/views.py
|
abhi20161997/Apogee-2017
| 0
|
12777816
|
<filename>aic/views.py
from django.shortcuts import render
# Create your views here.
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.shortcuts import render
from django.template import Context
from django.http import HttpResponse, JsonResponse
import string, random, os
from apogee17.settings import *
from django.views.decorators.csrf import csrf_exempt
from django.utils.datastructures import MultiValueDictKeyError
from .models import *
from django.template.defaultfilters import slugify
def website(request) :
return render(request, "aic/index.html")
@csrf_exempt
def problemstatement_add(request) :
return JsonResponse({'status':0,'message':'Submission is closed now.'}, safe=False)
data = request.POST
gl_name = data['gl_name']
gl_phone = data['gl_phone']
gl_email = data['gl_email']
gl_college = data['gl_college']
gl_yos = data['gl_yos']
member = {}
for x in range(1,4):
member[x] = {}
try:
key = 'mem-%s-name' % x
member[x]['name'] = data[key]
except KeyError:
member[x]['name'] = None
try:
key = 'mem-%s-email' % x
member[x]['email'] = data[key]
except KeyError:
member[x]['phone'] = None
try:
key = 'mem-%s-phone' % x
member[x]['phone'] = data[key]
except KeyError:
member[x]['email'] = None
try:
key = 'mem-%s-college' % x
member[x]['college'] = data[key]
except KeyError:
member[x]['college'] = None
try:
key = 'mem-%s-yos' % x
member[x]['yos'] = data[key]
except KeyError:
member[x]['yos'] = None
problem_statement = data['problem_statement']
solution = request.FILES['file']
print solution
try:
model_leader = Participant.objects.get(email=gl_email)
except:
model_leader = Participant.objects.create(name=gl_name, phone=gl_phone, email=gl_email, college=gl_college, yos=gl_yos)
model_member = {}
for x in range(1,4):
if member[x]['email'] != None:
try:
model_member[x] = Participant.objects.get(email=member[x]['email'])
except:
model_member[x] = Participant.objects.create(name=member[x]['name'], phone=member[x]['phone'], email=member[x]['email'], college=member[x]['college'], yos=member[x]['yos'])
else:
model_member[x] = None
solution.name = slugify(str(model_leader.id)+'-'+gl_college+'-'+gl_name)+'.zip'
model_project = AicSubmission.objects.create(leader=model_leader, solution=solution, problem_statement=problem_statement)
for x in range(1,4):
if model_member[x] != None:
model_project.members.add(model_member[x])
model_project.save()
response = {'status':1,'message':'Solution Submitted Successfully'}
return JsonResponse(response, safe=False)
def aic_excel(request):
from time import gmtime, strftime
import xlsxwriter
try:
import cStringIO as StringIO
except ImportError:
import StringIO
entries = AicSubmission.objects.filter().order_by('problem_statement', 'id')
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('new-spreadsheet')
date_format = workbook.add_format({'num_format': 'mmmm d yyyy'})
worksheet.write(0, 0, "Generated:")
generated = strftime("%d-%m-%Y %H:%M:%S UTC", gmtime())
worksheet.write(0, 1, generated)
worksheet.write(1, 0, "Problem Statement")
worksheet.write(1, 1, "Leader's Name")
worksheet.write(1, 2, "Leader's Phone")
worksheet.write(1, 3, "Leader's Email")
worksheet.write(1, 4, "Leader's College")
worksheet.write(1, 5, "Leader's Year of Study")
worksheet.write(1, 6, "Member-1's Name")
worksheet.write(1, 7, "Member-1's Phone")
worksheet.write(1, 8, "Member-1's Email")
worksheet.write(1, 9, "Member-1's College")
worksheet.write(1, 10, "Member-1's Year of Study")
worksheet.write(1, 11, "Member-2's Name")
worksheet.write(1, 12, "Member-2's Phone")
worksheet.write(1, 13, "Member-2's Email")
worksheet.write(1, 14, "Member-2's College")
worksheet.write(1, 15, "Member-2's Year of Study")
worksheet.write(1, 16, "Member-3's Name")
worksheet.write(1, 17, "Member-3's Phone")
worksheet.write(1, 18, "Member-3's Email")
worksheet.write(1, 19, "Member-3's College")
worksheet.write(1, 20, "Member-3's Year of Study")
for i, sub in enumerate(entries):
"""for each object in the date list, attribute1 & attribute2
are written to the first & second column respectively,
for the relevant row. The 3rd arg is a failure message if
there is no data available"""
worksheet.write(i+2, 0, sub.problem_statement)
worksheet.write(i+2, 1, sub.leader.name)
worksheet.write(i+2, 2, sub.leader.phone)
worksheet.write(i+2, 3, sub.leader.email)
worksheet.write(i+2, 4, sub.leader.college)
worksheet.write(i+2, 5, sub.leader.yos)
try:
worksheet.write(i+2, 6, sub.members.all()[0].name)
worksheet.write(i+2, 7, sub.members.all()[0].phone)
worksheet.write(i+2, 8, sub.members.all()[0].email)
worksheet.write(i+2, 9, sub.members.all()[0].college)
worksheet.write(i+2, 10, sub.members.all()[0].yos)
worksheet.write(i+2, 11, sub.members.all()[1].name)
worksheet.write(i+2, 12, sub.members.all()[1].phone)
worksheet.write(i+2, 13, sub.members.all()[1].email)
worksheet.write(i+2, 14, sub.members.all()[1].college)
worksheet.write(i+2, 15, sub.members.all()[1].yos)
worksheet.write(i+2, 16, sub.members.all()[2].name)
worksheet.write(i+2, 17, sub.members.all()[2].phone)
worksheet.write(i+2, 18, sub.members.all()[2].email)
worksheet.write(i+2, 19, sub.members.all()[2].college)
worksheet.write(i+2, 20, sub.members.all()[2].yos)
except:
pass
workbook.close()
filename = 'ExcelReport.xlsx'
output.seek(0)
response = HttpResponse(output.read(), content_type="application/ms-excel")
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
| 2
| 2
|
src/model/arch.py
|
guoyongcs/HNAS
| 60
|
12777817
|
<filename>src/model/arch.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.operations import *
from model.genotypes import COMPACT_PRIMITIVES, PRUNER_PRIMITIVES,COMPACT_PRIMITIVES_UPSAMPLING
from model.genotypes import Genotype
import model.utils as utils
import numpy as np
from model.utils import arch_to_genotype, draw_genotype, infinite_get, arch_to_string
import os
from model import common
import model.genotypes as genotypes
from model.utils import drop_path
import sys
sys.path.append("..")
from option import args
def make_model(args, parent=False):
return ArchNetwork(args)
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, upsample, upsample_prev):
super(Cell, self).__init__()
print(C_prev_prev, C_prev, C)
self.upsample = upsample
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if upsample:
op_names, f_nodes, t_nodes = zip(*genotype.upsampling)
concat = genotype.upsampling_concat
else:
op_names, f_nodes, t_nodes = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, f_nodes, t_nodes, concat, upsample)
def _compile(self, C, op_names, f_nodes, t_nodes, concat, upsample):
assert len(op_names) == len(f_nodes) == len(t_nodes)
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, f in zip(op_names, f_nodes):
scale = args.scale
stride = scale[0] if upsample and f < 2 else 1
op = OPS[name](C, stride, True)
self._ops += [op]
self._f_nodes = f_nodes
self._t_nodes = t_nodes
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = {0: s0, 1: s1}
for op, f, t in zip(self._ops, self._f_nodes, self._t_nodes):
s = op(states[f])
if self.training and drop_prob > 0.:
if not isinstance(op, Identity):
s = drop_path(s, drop_prob)
if t in states:
states[t] = states[t] + s
else:
states[t] = s
return torch.cat([states[i] for i in self._concat], dim=1)
class ArchNetwork(nn.Module):
def __init__(self,args,conv=common.default_conv):
super(ArchNetwork, self).__init__()
self._layers = args.layers
layers=args.layers
genotype=eval("genotypes.%s" % args.genotype)
stem_multiplier = 4
C=args.init_channels
self._upsampling_Pos = args.upsampling_Pos
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
C_curr = stem_multiplier * C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
upsample_prev = False
for i in range(layers):
if i in [self._upsampling_Pos]:
upsample = True
else:
upsample = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, upsample, upsample_prev)
upsample_prev = upsample
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
self.final_conv= conv(C_prev, args.n_colors, 3)
def model_parameters(self):
for k, v in self.named_parameters():
if 'arch' not in k:
yield v
def forward(self, input, drop_path_prob=0):
self.drop_path_prob = drop_path_prob
try:
input = self.sub_mean(input)
except:
input = input.cuda()
input = self.sub_mean(input)
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.upsample == True:
s0, s1 = cell(s0, s1, self.drop_path_prob), cell(s0, s1, self.drop_path_prob)
else:
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
logits = self.final_conv(s1)
logits = self.add_mean(logits)
return logits
def save_arch_to_pdf(self,suffix):
genotype = arch_to_genotype(self.cur_normal_arch, self.cur_normal_arch, self._steps, "COMPACT")
return genotype
| 2.21875
| 2
|
SIGNUS/app/models/mongodb/realtime.py
|
837477/SIGNUS
| 0
|
12777818
|
'''
MongoDB realtime Collection Model
'''
from flask import current_app
from datetime import timedelta, datetime
class Realtime:
"""SIGNUS DB realtime Model"""
def __init__(self, client):
self.col = client[current_app.config['MONGODB_DB_NAME']]['realtime']
def find_latest(self):
''' 최신 실시간 검색어 반환 '''
return list(self.col.find({},{'_id': 0}).sort([('date', -1)]).limit(1))[0]
| 2.765625
| 3
|
alphamind/tests/portfolio/test_optimizers.py
|
rongliang-tech/alpha-mind
| 186
|
12777819
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Created on 2017-11-1
@author: cheng.li
"""
import unittest
import numpy as np
from alphamind.portfolio.optimizers import LPOptimizer
from alphamind.portfolio.optimizers import QuadraticOptimizer
from alphamind.portfolio.optimizers import TargetVolOptimizer
class TestOptimizers(unittest.TestCase):
def test_lpoptimizer(self):
er = np.array([-1., -2.])
lower_bound = np.array([0., 0.2])
upper_bound = np.array([1., 0.8])
optimizer = LPOptimizer(objective=-er,
cons_matrix=np.array([[1., 1., 1., 1.]]),
lbound=lower_bound,
ubound=upper_bound)
self.assertAlmostEqual(optimizer.feval(), 1.2)
np.testing.assert_array_almost_equal(optimizer.x_value(), [0.8, 0.2])
def test_qpoptimizer(self):
er = np.array([0.01, 0.02, 0.03])
cov = np.array([[0.02, 0.01, 0.02],
[0.01, 0.02, 0.03],
[0.02, 0.03, 0.02]])
ids_var = np.diag([0.01, 0.02, 0.03])
cov += ids_var
lbound = np.array([0., 0., 0.])
ubound = np.array([0.4, 0.4, 0.5])
cons = np.array([[1., 1., 1.],
[1., 0., 1.]])
clbound = np.array([1., 0.3])
cubound = np.array([1., 0.7])
cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1)
optimizer = QuadraticOptimizer(objective=-er,
cov=cov,
lbound=lbound,
ubound=ubound,
cons_matrix=cons_matrix)
# check against matlab result
np.testing.assert_array_almost_equal(optimizer.x_value(),
[0.2, 0.3, 0.5],
4)
def test_qpoptimizer_with_factor_model(self):
er = np.array([0.1, 0.2, 0.3])
lbound = np.array([0.0, 0.0, 0.0])
ubound = np.array([1.0, 1.0, 1.0])
factor_var = np.array([[0.5, -0.3], [-0.3, 0.7]])
factor_load = np.array([[0.8, 0.2], [0.5, 0.5], [0.2, 0.8]])
idsync = np.array([0.1, 0.3, 0.2])
cons = np.array([[1., 1., 1.]])
clbound = np.array([1.])
cubound = np.array([1.])
cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1)
optimizer = QuadraticOptimizer(objective=-er,
lbound=lbound,
ubound=ubound,
factor_cov=factor_var,
factor_load=factor_load,
factor_special=idsync,
cons_matrix=cons_matrix)
# check against cvxpy result
np.testing.assert_array_almost_equal(optimizer.x_value(),
[0.2866857, 0.21416417, 0.49915014],
4)
def test_qpoptimizer_with_identity_matrix(self):
er = np.array([-0.02, 0.01, 0.03])
cov = np.diag([1., 1., 1.])
optimizer = QuadraticOptimizer(objective=-er,
cov=cov)
np.testing.assert_array_almost_equal(optimizer.x_value(),
[-0.02, 0.01, 0.03],
4)
def test_target_vol_optimizer_without_cons(self):
er = np.array([0.1, 0.2, 0.3])
cov = np.array([[0.05, 0.01, 0.02],
[0.01, 0.06, 0.03],
[0.02, 0.03, 0.07]])
lbound = np.array([-0.3, -0.3, -0.3])
ubound = np.array([0.5, 0.5, 0.5])
target_vol = 0.1
optimizer = TargetVolOptimizer(objective=-er,
cov=cov,
lbound=lbound,
ubound=ubound,
target_vol=target_vol)
# check against known good result
np.testing.assert_array_almost_equal(optimizer.x_value(),
[.0231776, 0.1274768, 0.30130881],
4)
def test_target_vol_optimizer_with_cons(self):
er = np.array([0.1, 0.2, 0.3])
cov = np.array([[0.05, 0.01, 0.02],
[0.01, 0.06, 0.03],
[0.02, 0.03, 0.07]])
lbound = np.array([-0.3, -0.3, -0.3])
ubound = np.array([0.5, 0.5, 0.5])
cons = np.array([[1., 1., 1.]])
clbound = np.array([0.])
cubound = np.array([0.])
cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1)
target_vol = 0.1
optimizer = TargetVolOptimizer(objective=-er,
cov=cov,
lbound=lbound,
ubound=ubound,
target_vol=target_vol,
cons_matrix=cons_matrix)
# check against known good result
np.testing.assert_array_almost_equal(optimizer.x_value(),
[-0.3, -0.10919033, 0.40919033],
4)
def test_target_vol_optimizer_with_factor_model(self):
er = np.array([0.1, 0.2, 0.3])
lbound = np.array([0.0, 0.0, 0.0])
ubound = np.array([1.0, 1.0, 1.0])
factor_var = np.array([[0.5, -0.3], [-0.3, 0.7]])
factor_load = np.array([[0.8, 0.2], [0.5, 0.5], [0.2, 0.8]])
idsync = np.array([0.1, 0.3, 0.2])
cons = np.array([[1., 1., 1.]])
clbound = np.array([1.])
cubound = np.array([1.])
target_vol = 0.5
cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1)
optimizer = TargetVolOptimizer(objective=-er,
factor_cov=factor_var,
factor_load=factor_load,
factor_special=idsync,
lbound=lbound,
ubound=ubound,
target_vol=target_vol,
cons_matrix=cons_matrix)
# check against cvxpy result
np.testing.assert_array_almost_equal(optimizer.x_value(),
[0.26595552, 0.21675092, 0.51729356],
4)
def test_target_vol_with_cons_and_ieq(self):
er = np.array([0.1, 0.2, 0.3])
cov = np.array([[0.05, 0.01, 0.02],
[0.01, 0.06, 0.03],
[0.02, 0.03, 0.07]])
lbound = np.array([-0.3, -0.3, -0.3])
ubound = np.array([0.5, 0.5, 0.5])
cons = np.array([[1., 1., 1.]])
clbound = np.array([0.])
cubound = np.array([0.])
target_vol = 0.1
cons_matrix = np.concatenate([cons, clbound.reshape((-1, 1)), cubound.reshape((-1, 1))], axis=1)
optimizer = TargetVolOptimizer(objective=-er,
cov=cov,
lbound=lbound,
ubound=ubound,
target_vol=target_vol,
cons_matrix=cons_matrix)
# check against known good result
np.testing.assert_array_almost_equal(optimizer.x_value(),
[-0.3, -0.10919033, 0.40919033],
4)
if __name__ == '__main__':
unittest.main()
| 2.21875
| 2
|
src/python-azure-ad-token-validate/demo.py
|
ivangeorgiev/gems
| 10
|
12777820
|
<gh_stars>1-10
import os
import sys
import jwt
from aadtoken import get_public_key, get_jwks
client_id = os.environ.get('CLIENT_ID', '<your-webapp-id-goes-here>')
tenant_id = os.environ.get('TENANT_ID', '<your-tenant-id-goes-here>')
if len(sys.argv) > 1:
token = sys.argv[1]
else:
token = os.environ.get('TOKEN', "<your-token-goes-here>")
issuer = 'https://sts.windows.net/{tenant_id}/'.format(tenant_id=tenant_id)
public_key = get_public_key(token)
public_key = get_public_key(token)
public_key = get_public_key(token)
decoded = jwt.decode(token,
public_key,
verify=True,
algorithms=['RS256'],
audience=[client_id],
issuer=issuer)
print(decoded)
print(get_jwks.cache_info())
| 2.34375
| 2
|
ws2122-lspm/Lib/site-packages/pm4py/algo/filtering/log/ltl/ltl_checker.py
|
Malekhy/ws2122-lspm
| 1
|
12777821
|
<filename>ws2122-lspm/Lib/site-packages/pm4py/algo/filtering/log/ltl/ltl_checker.py
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from enum import Enum
from pm4py.objects.conversion.log import converter as log_converter
from pm4py.objects.log.obj import EventLog
from pm4py.util import exec_utils
from pm4py.util.constants import PARAMETER_CONSTANT_ATTRIBUTE_KEY, PARAMETER_CONSTANT_RESOURCE_KEY, \
PARAMETER_CONSTANT_TIMESTAMP_KEY
from pm4py.util.xes_constants import DEFAULT_NAME_KEY, DEFAULT_RESOURCE_KEY, DEFAULT_TIMESTAMP_KEY
import deprecation
from typing import Optional, Dict, Any, Union, Tuple, List
from pm4py.objects.log.obj import EventLog, EventStream, Trace
class Parameters(Enum):
ATTRIBUTE_KEY = PARAMETER_CONSTANT_ATTRIBUTE_KEY
TIMESTAMP_KEY = PARAMETER_CONSTANT_TIMESTAMP_KEY
RESOURCE_KEY = PARAMETER_CONSTANT_RESOURCE_KEY
POSITIVE = "positive"
ENABLE_TIMESTAMP = "enable_timestamp"
TIMESTAMP_DIFF_BOUNDARIES = "timestamp_diff_boundaries"
POSITIVE = Parameters.POSITIVE
ENABLE_TIMESTAMP = Parameters.ENABLE_TIMESTAMP
TIMESTAMP_DIFF_BOUNDARIES = Parameters.TIMESTAMP_DIFF_BOUNDARIES
def timestamp_list_is_ge(a, b):
for i in range(len(a)):
if a[i] < b[i][0]:
return False
return True
def timestamp_list_is_le(a, b):
for i in range(len(a)):
if a[i] > b[i][1]:
return False
return True
@deprecation.deprecated('2.2.6', '3.0.0', 'please use pm4py.algo.filtering.log.ltl.ltl_checker.eventually_follows')
def A_eventually_B(log, A, B, parameters=None):
"""
Applies the A eventually B rule
Parameters
------------
log
Log
A
A attribute value
B
B attribute value
parameters
Parameters of the algorithm, including the attribute key and the positive parameter:
- If True, returns all the cases containing A and B and in which A was eventually followed by B
- If False, returns all the cases not containing A or B, or in which an instance of A was not eventually
followed by an instance of B
Returns
------------
filtered_log
Filtered log
"""
if parameters is None:
parameters = {}
if not isinstance(log, EventLog):
log = log_converter.apply(log, variant=log_converter.TO_EVENT_LOG, parameters=parameters)
attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY)
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY)
positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True)
enable_timestamp = exec_utils.get_param_value(Parameters.ENABLE_TIMESTAMP, parameters, False)
timestamp_diff_boundaries = exec_utils.get_param_value(Parameters.TIMESTAMP_DIFF_BOUNDARIES, parameters, [])
new_log = EventLog(list(), attributes=log.attributes, extensions=log.extensions, classifiers=log.classifiers,
omni_present=log.omni_present, properties=log.properties)
for trace in log:
if enable_timestamp:
occ_A = [trace[i][timestamp_key] for i in range(len(trace)) if
attribute_key in trace[i] and trace[i][attribute_key] == A]
occ_B = [trace[i][timestamp_key] for i in range(len(trace)) if
attribute_key in trace[i] and trace[i][attribute_key] == B]
diffs = [[(occ_B[j].timestamp() - occ_A[i].timestamp())] for i in range(len(occ_A)) for j in
range(len(occ_B)) if occ_B[j] > occ_A[i]]
else:
occ_A = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == A]
occ_B = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == B]
diffs = [[(occ_B[j] - occ_A[i])] for i in range(len(occ_A)) for j in range(len(occ_B)) if
occ_B[j] > occ_A[i]]
if enable_timestamp and timestamp_diff_boundaries:
diffs = [d for d in diffs if
timestamp_list_is_ge(d, timestamp_diff_boundaries) and timestamp_list_is_le(d,
timestamp_diff_boundaries)]
if diffs:
if positive:
new_log.append(trace)
elif not positive:
new_log.append(trace)
return new_log
@deprecation.deprecated('2.2.6', '3.0.0', 'please use pm4py.algo.filtering.log.ltl.ltl_checker.eventually_follows')
def A_eventually_B_eventually_C(log, A, B, C, parameters=None):
"""
Applies the A eventually B eventually C rule
Parameters
------------
log
Log
A
A attribute value
B
B attribute value
C
C attribute value
parameters
Parameters of the algorithm, including the attribute key and the positive parameter:
- If True, returns all the cases containing A, B and C and in which A was eventually followed by B and B was eventually followed by C
- If False, returns all the cases not containing A or B or C, or in which an instance of A was not eventually
followed by an instance of B or an instance of B was not eventually followed by C
Returns
------------
filtered_log
Filtered log
"""
if parameters is None:
parameters = {}
if not isinstance(log, EventLog):
log = log_converter.apply(log, variant=log_converter.TO_EVENT_LOG, parameters=parameters)
attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY)
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY)
positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True)
enable_timestamp = exec_utils.get_param_value(Parameters.ENABLE_TIMESTAMP, parameters, False)
timestamp_diff_boundaries = exec_utils.get_param_value(Parameters.TIMESTAMP_DIFF_BOUNDARIES, parameters, [])
new_log = EventLog(list(), attributes=log.attributes, extensions=log.extensions, classifiers=log.classifiers,
omni_present=log.omni_present, properties=log.properties)
for trace in log:
if enable_timestamp:
occ_A = [trace[i][timestamp_key] for i in range(len(trace)) if
attribute_key in trace[i] and trace[i][attribute_key] == A]
occ_B = [trace[i][timestamp_key] for i in range(len(trace)) if
attribute_key in trace[i] and trace[i][attribute_key] == B]
occ_C = [trace[i][timestamp_key] for i in range(len(trace)) if
attribute_key in trace[i] and trace[i][attribute_key] == C]
diffs = [[occ_B[j].timestamp() - occ_A[i].timestamp(), occ_C[z].timestamp() - occ_B[j].timestamp()] for i in
range(len(occ_A)) for j in range(len(occ_B))
for z in range(len(occ_C)) if occ_B[j] > occ_A[i] and occ_C[z] > occ_B[j]]
else:
occ_A = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == A]
occ_B = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == B]
occ_C = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == C]
diffs = [[occ_B[j] - occ_A[i], occ_C[z] - occ_B[j]] for i in range(len(occ_A)) for j in range(len(occ_B))
for z in range(len(occ_C)) if occ_B[j] > occ_A[i] and occ_C[z] > occ_B[j]]
if enable_timestamp and timestamp_diff_boundaries:
diffs = [d for d in diffs if
timestamp_list_is_ge(d, timestamp_diff_boundaries) and timestamp_list_is_le(d,
timestamp_diff_boundaries)]
if diffs:
if positive:
new_log.append(trace)
elif not positive:
new_log.append(trace)
return new_log
@deprecation.deprecated('2.2.6', '3.0.0', 'please use pm4py.algo.filtering.log.ltl.ltl_checker.eventually_follows')
def A_eventually_B_eventually_C_eventually_D(log, A, B, C, D, parameters=None):
"""
Applies the A eventually B eventually C rule
Parameters
------------
log
Log
A
A attribute value
B
B attribute value
C
C attribute value
D
D attribute value
parameters
Parameters of the algorithm, including the attribute key and the positive parameter:
- If True, returns all the cases containing A, B and C and in which A was eventually followed by B and B was eventually followed by C
- If False, returns all the cases not containing A or B or C, or in which an instance of A was not eventually
followed by an instance of B or an instance of B was not eventually followed by C
Returns
------------
filtered_log
Filtered log
"""
if parameters is None:
parameters = {}
if not isinstance(log, EventLog):
log = log_converter.apply(log, variant=log_converter.TO_EVENT_LOG, parameters=parameters)
attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY)
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY)
positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True)
enable_timestamp = exec_utils.get_param_value(Parameters.ENABLE_TIMESTAMP, parameters, False)
timestamp_diff_boundaries = exec_utils.get_param_value(Parameters.TIMESTAMP_DIFF_BOUNDARIES, parameters, [])
new_log = EventLog(list(), attributes=log.attributes, extensions=log.extensions, classifiers=log.classifiers,
omni_present=log.omni_present, properties=log.properties)
for trace in log:
if enable_timestamp:
occ_A = [trace[i][timestamp_key] for i in range(len(trace)) if
attribute_key in trace[i] and trace[i][attribute_key] == A]
occ_B = [trace[i][timestamp_key] for i in range(len(trace)) if
attribute_key in trace[i] and trace[i][attribute_key] == B]
occ_C = [trace[i][timestamp_key] for i in range(len(trace)) if
attribute_key in trace[i] and trace[i][attribute_key] == C]
occ_D = [trace[i][timestamp_key] for i in range(len(trace)) if
attribute_key in trace[i] and trace[i][attribute_key] == D]
diffs = [[occ_B[j].timestamp() - occ_A[i].timestamp(), occ_C[z].timestamp() - occ_B[j].timestamp(),
occ_D[za].timestamp() - occ_C[z].timestamp()] for i in
range(len(occ_A)) for j in range(len(occ_B))
for z in range(len(occ_C)) for za in range(len(occ_D)) if
occ_B[j] > occ_A[i] and occ_C[z] > occ_B[j] and occ_D[za] > occ_C[z]]
else:
occ_A = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == A]
occ_B = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == B]
occ_C = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == C]
occ_D = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == D]
diffs = [[occ_B[j] - occ_A[i], occ_C[z] - occ_B[j], occ_D[za] - occ_C[z]] for i in range(len(occ_A)) for j
in range(len(occ_B))
for z in range(len(occ_C)) for za in range(len(occ_D)) if
occ_B[j] > occ_A[i] and occ_C[z] > occ_B[j] and occ_D[za] > occ_C[z]]
if enable_timestamp and timestamp_diff_boundaries:
diffs = [d for d in diffs if
timestamp_list_is_ge(d, timestamp_diff_boundaries) and timestamp_list_is_le(d,
timestamp_diff_boundaries)]
if diffs:
if positive:
new_log.append(trace)
elif not positive:
new_log.append(trace)
return new_log
def eventually_follows(log: EventLog, attribute_values: List[str], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> EventLog:
"""
Applies the eventually follows rule
Parameters
------------
log
Log
attribute_values
A list of attribute_values attribute_values[n] follows attribute_values[n-1] follows ... follows attribute_values[0]
parameters
Parameters of the algorithm, including the attribute key and the positive parameter:
- If True, returns all the cases containing all attribute_values and in which attribute_values[i] was eventually followed by attribute_values[i + 1]
- If False, returns all the cases not containing all attribute_values, or in which an instance of attribute_values[i] was not eventually
followed by an instance of attribute_values[i + 1]
Returns
------------
filtered_log
Filtered log
"""
if parameters is None:
parameters = {}
attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY)
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY)
positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True)
enable_timestamp = exec_utils.get_param_value(Parameters.ENABLE_TIMESTAMP, parameters, False)
timestamp_diff_boundaries = exec_utils.get_param_value(Parameters.TIMESTAMP_DIFF_BOUNDARIES, parameters, [])
new_log = EventLog(list(), attributes=log.attributes, extensions=log.extensions, classifiers=log.classifiers,
omni_present=log.omni_present, properties=log.properties)
for trace in log:
if enable_timestamp:
occurrences = [[trace[i][timestamp_key].timestamp() for i in range(len(trace))
if attribute_key in trace[i] and trace[i][attribute_key] == attribute_value] for attribute_value in attribute_values]
else:
occurrences = [[i for i in range(len(trace))
if attribute_key in trace[i] and trace[i][attribute_key] == attribute_value] for attribute_value in attribute_values]
is_good = True
if enable_timestamp and timestamp_diff_boundaries:
prev_min = min(occurrences[0], default=-1)
for i in range(1, len(attribute_values)):
if prev_min == -1 or len(occurrences[i]) == 0:
is_good = False
break
if timestamp_diff_boundaries:
min_diff = timestamp_diff_boundaries[i - 1][0]
max_diff = timestamp_diff_boundaries[i - 1][1]
min_timestamp = min([o for o in occurrences[i] if (o - prev_min) >= min_diff and (o - prev_min) <= max_diff], default=-1)
else:
min_timestamp = min([o for o in occurrences[i] if o >= prev_min], default = -1)
prev_min = min_timestamp
if prev_min == -1:
is_good = False
break
else:
prev_min = min(occurrences[0], default=-1)
for i in range(1, len(attribute_values)):
if prev_min == -1:
is_good = False
break
if len(occurrences[i]) == 0:
is_good = False
break
min_index = min([o for o in occurrences[i] if o >= prev_min], default = -1)
prev_min = min_index
if is_good:
if positive:
new_log.append(trace)
elif not positive:
new_log.append(trace)
return new_log
def A_next_B_next_C(log: EventLog, A: str, B: str, C: str, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> EventLog:
"""
Applies the A next B next C rule
Parameters
------------
log
Log
A
A attribute value
B
B attribute value
C
C attribute value
parameters
Parameters of the algorithm, including the attribute key and the positive parameter:
- If True, returns all the cases containing A, B and C and in which A was directly followed by B and B was directly followed by C
- If False, returns all the cases not containing A or B or C, or in which none instance of A was directly
followed by an instance of B and B was directly followed by C
Returns
------------
filtered_log
Filtered log
"""
if parameters is None:
parameters = {}
if not isinstance(log, EventLog):
log = log_converter.apply(log, variant=log_converter.TO_EVENT_LOG, parameters=parameters)
attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY)
positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True)
new_log = EventLog(list(), attributes=log.attributes, extensions=log.extensions, classifiers=log.classifiers,
omni_present=log.omni_present, properties=log.properties)
for trace in log:
occ_A = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == A]
occ_B = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == B]
occ_C = [i for i in range(len(trace)) if attribute_key in trace[i] and trace[i][attribute_key] == C]
found = False
for a in occ_A:
for b in occ_B:
for c in occ_C:
if (b - a) == 1 and (c - b) == 1:
found = True
if found:
if positive:
new_log.append(trace)
elif not positive:
new_log.append(trace)
return new_log
def four_eyes_principle(log: EventLog, A: str, B: str, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> EventLog:
"""
Verifies the Four Eyes Principle given A and B
Parameters
-------------
log
Log
A
A attribute value
B
B attribute value
parameters
Parameters of the algorithm, including the attribute key and the positive parameter:
- if True, then filters all the cases containing A and B which have empty intersection between the set
of resources doing A and B
- if False, then filters all the cases containing A and B which have no empty intersection between the set
of resources doing A and B
Returns
--------------
filtered_log
Filtered log
"""
if parameters is None:
parameters = {}
if not isinstance(log, EventLog):
log = log_converter.apply(log, variant=log_converter.TO_EVENT_LOG, parameters=parameters)
attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY)
resource_key = exec_utils.get_param_value(Parameters.RESOURCE_KEY, parameters, DEFAULT_RESOURCE_KEY)
positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True)
new_log = EventLog(list(), attributes=log.attributes, extensions=log.extensions, classifiers=log.classifiers,
omni_present=log.omni_present, properties=log.properties)
for trace in log:
occ_A = set([trace[i][resource_key] for i in range(len(trace)) if
attribute_key in trace[i] and resource_key in trace[i] and trace[i][attribute_key] == A])
occ_B = set([trace[i][resource_key] for i in range(len(trace)) if
attribute_key in trace[i] and resource_key in trace[i] and trace[i][attribute_key] == B])
if len(occ_A) > 0 and len(occ_B) > 0:
inte = occ_A.intersection(occ_B)
if not positive and len(inte) > 0:
new_log.append(trace)
elif positive and len(inte) == 0:
new_log.append(trace)
return new_log
def attr_value_different_persons(log: EventLog, A: str, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> EventLog:
"""
Checks whether an attribute value is assumed on events done by different resources
Parameters
------------
log
Log
A
A attribute value
parameters
Parameters of the algorithm, including the attribute key and the positive parameter:
- if True, then filters all the cases containing occurrences of A done by different resources
- if False, then filters all the cases not containing occurrences of A done by different resources
Returns
-------------
filtered_log
Filtered log
"""
if parameters is None:
parameters = {}
if not isinstance(log, EventLog):
log = log_converter.apply(log, variant=log_converter.TO_EVENT_LOG, parameters=parameters)
attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY)
resource_key = exec_utils.get_param_value(Parameters.RESOURCE_KEY, parameters, DEFAULT_RESOURCE_KEY)
positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True)
new_log = EventLog(list(), attributes=log.attributes, extensions=log.extensions, classifiers=log.classifiers,
omni_present=log.omni_present, properties=log.properties)
for trace in log:
occ_A = set([trace[i][resource_key] for i in range(len(trace)) if
attribute_key in trace[i] and resource_key in trace[i] and trace[i][attribute_key] == A])
if len(occ_A) > 1:
if positive:
new_log.append(trace)
elif not positive:
new_log.append(trace)
return new_log
| 2.21875
| 2
|
app/schemas/user.py
|
Gingernaut/microAuth
| 28
|
12777822
|
<filename>app/schemas/user.py
from pydantic import BaseModel, Schema
from uuid import UUID
from config import get_config
from datetime import datetime
app_config = get_config()
class UserBase(BaseModel):
firstName: str = None
lastName: str = None
emailAddress: str
phoneNumber: str = None
class UserCreate(UserBase):
emailAddress: str
password: str = Schema(..., min_length=app_config.MIN_PASS_LENGTH)
firstName: str = None
lastName: str = None
phoneNumber: str = None
class User(UserBase):
id: int
createdTime: datetime
modifiedTime: datetime
UUID: UUID
isVerified: bool
userRole: str
class Config:
orm_mode = True
# All optional fields that can but updated via PUT
class UserUpdate(UserBase):
emailAddress: str = None
isVerified: bool = None
userRole: str = None
password: str = Schema(None, min_length=app_config.MIN_PASS_LENGTH)
class Config:
orm_mode = True
class LoggedInUser(User):
jwt: str
class Config:
orm_mode = True
| 2.625
| 3
|
joueur/utilities.py
|
amaag/mmai-pirates
| 4
|
12777823
|
<reponame>amaag/mmai-pirates
import re
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camel_case_converter(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
| 2.703125
| 3
|
test_chocolate.py
|
byungsook/neural-flow-style
| 93
|
12777824
|
<gh_stars>10-100
#############################################################
# MIT License, Copyright © 2020, ETH Zurich, <NAME>
#############################################################
import numpy as np
import tensorflow as tf
import os
from tqdm import trange
from config import get_config
from util import *
from styler_3p import Styler
import sys
sys.path.append('E:/partio/build/py/Release')
import partio
def run(config):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # so the IDs match nvidia-smi
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_id # "0, 1" for multiple
prepare_dirs_and_logger(config)
tf.compat.v1.set_random_seed(config.seed)
config.rng = np.random.RandomState(config.seed)
styler = Styler(config)
styler.load_img(config.resolution[1:])
params = {}
# load particles
nmin, nmax = np.iinfo(np.int32).max, 0
for i in range(config.num_frames):
pt_path = os.path.join(config.data_dir, config.dataset, config.d_path % (config.target_frame+i))
pt = partio.read(pt_path)
p_num = pt.numParticles()
nmin = min(p_num, nmin)
nmax = max(p_num, nmax)
print('# range:', nmin, nmax)
p = []
# r = []
for i in trange(config.num_frames, desc='load particle'): # last one for mask
pt_path = os.path.join(config.data_dir, config.dataset, config.d_path % (config.target_frame+i))
pt = partio.read(pt_path)
p_attr_id = pt.attributeInfo('id')
p_attr_pos = pt.attributeInfo('position')
# p_attr_den = pt.attributeInfo('density')
p_ = np.ones([nmax,3], dtype=np.float32)*-1
# r_ = np.zeros([nmax,1], dtype=np.float32)
p_num = pt.numParticles()
for j in range(p_num):
p_id_j = pt.get(p_attr_id, j)[0]
p_[p_id_j] = pt.get(p_attr_pos, p_id_j)
# r_[p_id_j] = pt.get(p_attr_den, p_id_j)
# r.append(r_)
# normalize particle position [0-1]
px, py, pz = p_[...,0], p_[...,1], p_[...,2]
px /= config.domain[2]
py /= config.domain[1]
pz /= config.domain[0]
p_ = np.stack([pz,py,px], axis=-1)
p.append(p_)
print('resolution:', config.resolution)
print('domain:', config.domain)
print('radius:', config.radius)
print('normalized px range', px.min(), px.max())
print('normalized py range', py.min(), py.max())
params['p'] = p
# styler.render_test(params)
result = styler.run(params)
# save loss plot
l = result['l']
lb = []
for o, l_ in enumerate(l):
lb_, = plt.plot(range(len(l_)), l_, label='oct %d' % o)
lb.append(lb_)
plt.legend(handles=lb)
# plt.show()
plot_path = os.path.join(config.log_dir, 'loss_plot.png')
plt.savefig(plot_path)
# save particle (load using Houdini GPlay)
p_sty = result['p']
p = []
# v_sty = result['v']
# v = []
for i in range(config.num_frames):
# denormalize particle positions
px, py, pz = p_sty[i][...,2], p_sty[i][...,1], p_sty[i][...,0]
px *= config.domain[2]
py *= config.domain[1]
pz *= config.domain[0]
p_sty_ = np.stack([px,py,pz], axis=-1)
p.append(p_sty_)
# # denormalize particle displacement for stylization
# vx, vy, vz = v_sty[i][...,2], v_sty[i][...,1], v_sty[i][...,0]
# vx *= config.domain[2]
# vy *= config.domain[1]
# vz *= config.domain[0]
# v_sty_ = np.stack([vx,vy,vz], axis=-1)
# v.append(v_sty_)
# create a particle set and attributes
pt = partio.create()
position = pt.addAttribute("position",partio.VECTOR,3)
# color = pt.addAttribute("Cd",partio.FLOAT,3)
radius = pt.addAttribute("radius",partio.FLOAT,1)
# normal = pt.addAttribute("normal",partio.VECTOR,3)
for p_sty_i in p_sty_:
if p_sty_i[0] < 0: continue
p_ = pt.addParticle()
pt.set(position, p_, tuple(p_sty_i.astype(np.float)))
pt.set(radius, p_, (config.radius,))
p_path = os.path.join(config.log_dir, '%03d.bgeo' % (config.target_frame+i))
partio.write(p_path, pt)
r_sty = result['r']
for i, r_sty_ in enumerate(r_sty):
im = Image.fromarray(r_sty_)
d_path = os.path.join(config.log_dir, '%03d.png' % (config.target_frame+i))
im.save(d_path)
d_intm = result['d_intm']
for o, d_intm_o in enumerate(d_intm):
for i, d_intm_ in enumerate(d_intm_o):
if d_intm_ is None: continue
im = Image.fromarray(d_intm_)
d_path = os.path.join(config.log_dir, 'o%02d_%03d.png' % (o, config.target_frame+i))
im.save(d_path)
# visualization using open3d
bbox = [
[0,0,0],
[config.domain[2],config.domain[1],config.domain[0]], # [X,Y,Z]
]
draw_pt(p, bbox=bbox, dt=0.1, is_2d=False) # pv=v,
def main(config):
config.dataset = 'chocolate'
config.d_path = 'partio/ParticleData_Fluid_%d.bgeo'
# from scene
config.radius = 0.025
config.support = 4
config.disc = 2 # 1 or 2
config.rest_density = 1000
config.resolution = [128,128,128] # original resolution, # [D,H,W]
cell_size = 2*config.radius*config.disc
config.domain = [float(_*cell_size) for _ in config.resolution] # [D,H,W]
config.nsize = max(3-config.disc,1) # 1 is enough if disc is 2, 2 if disc is 1
# upscaling for rendering
config.resolution = [200,200,200]
# default settings
config.lr = 0.002
config.iter = 20
config.resize_scale = 1
config.transmit = 0.2 # 0.01, 1
config.clip = False # ignore particles outside of domain
config.num_kernels = 1
config.k = 3
config.network = 'tensorflow_inception_graph.pb'
config.octave_n = 2
config.octave_scale = 1.8
config.render_liquid = True
config.rotate = False
config.style_layer = ['conv2d2','mixed3b','mixed4b']
config.w_style_layer = [1,1,1]
#####################
# frame range setting
config.frames_per_opt = 120
config.batch_size = 1
config.window_sigma = 9
# multi_frame = True
# if multi_frame:
# config.target_frame = 1
# config.num_frames = 120
# config.interp = 1
# ######
# # interpolation test
# interpolate = False
# if interpolate:
# config.interp = 5
# n = (config.num_frames-1)//config.interp
# config.num_frames = n*config.interp + 1
# assert (config.num_frames - 1) % config.interp == 0
# #####
#
# else:
# config.target_frame = 90
# config.num_frames = 1
# config.interp = 1
######
# position test
config.target_field = 'p'
semantic = False
pressure = False
# if semantic:
# config.w_style = 0
# config.w_content = 1
# config.content_layer = 'mixed3b_3x3_bottleneck_pre_relu'
# config.content_channel = 44 # net
# else:
# # style
# config.w_style = 1
# config.w_content = 0
# style_list = {
# 'spiral': 'pattern1.png',
# 'fire_new': 'fire_new.jpg',
# 'ben_giles': 'ben_giles.png',
# 'wave': 'wave.jpeg',
# }
# style = 'spiral'
# config.style_target = os.path.join(config.data_dir, 'image', style_list[style])
# pressure test
if pressure:
config.w_pressure = 1e12 # 1e10 ~ 1e12
#####
if config.w_content == 1:
config.tag = 'test_%s_%s_%d' % (
config.target_field, config.content_layer, config.content_channel)
else:
style = os.path.splitext(os.path.basename(config.style_target))[0]
config.tag = 'test_%s_%s' % (
config.target_field, style)
config.tag += '_%d_intp%d' % (config.num_frames, config.interp)
quick_test = False
if quick_test:
config.scale = 1
config.iter = 0
config.octave_n = 1
run(config)
if __name__ == "__main__":
config, unparsed = get_config()
main(config)
| 1.84375
| 2
|
Session 7 - Python/WagerWarCardGame/Development/WWOverlord.py
|
dbowmans46/PracticalProgramming
| 0
|
12777825
|
<reponame>dbowmans46/PracticalProgramming<filename>Session 7 - Python/WagerWarCardGame/Development/WWOverlord.py
# -*- coding: utf-8 -*-
"""
LICENSE (MIT License):
Copyright 2018 <NAME>, <NAME>, and <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
from PyQt5 import QtCore
from PyQt5.QtWidgets import QDialog, QApplication
from WWSetupWindow import WWSetupWindow
from WWWarConstants import WWWarConstants
from WWGameManager import WWGameManager
from WWGameWindow import WWGameWindow
from WWVictoryWindow import WWVictoryWindow
from WWDataLogger import WWDataLogger
from WWCardsDeck import WWCardsDeck
from WWShuffleDeck import WWShuffleDeck
if __name__ == "__main__":
wwgm = WWGameManager()
const = WWWarConstants()
app = QApplication(sys.argv)
WWDataLogger.deleteLogger() # this will delete the "game_log.json" before running new game
while (WWGameManager.playAgainToggle):
WWGameManager.playAgainToggle = False
wwsw = WWSetupWindow()
wwsw.setupUi()
while wwsw.wwswIsActive == True:
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.exit()
wwsw.Dialog.close()
wwgw = WWGameWindow()
wwgw.setTheStage()
while wwgw.wwgwIsActive == True:
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.exit()
wwgw.MainWindow.close()
wwvw = WWVictoryWindow()
wwvw.setTheStage()
while wwvw.wwvwIsActive == True:
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.exit()
wwvw.MainWindow.close()
# Reset game manager values when playing again
if WWGameManager.playAgainToggle:
'''
@ brief Dealing deck, used to create all players decks
@ param str-list
'''
WWGameManager.gameDeck = WWCardsDeck([])
'''
@ brief Deck holding all wagers
@ param WWCardsDeck
'''
WWGameManager.wagerDeck = WWCardsDeck([])
'''
@ brief Deck that computer plays from
@ param WWCardsDeck
'''
WWGameManager.computerDeck = WWCardsDeck([])
'''
@ brief Computers winnings, After each turn, will be used again
@ param WWCardsDeck
'''
WWGameManager.computerGraveyardDeck = WWShuffleDeck([])
'''
@ brief computer's card being used in war. does not include wagers
@ param WWCardsDeck
'''
WWGameManager.computerBattleDeck = WWCardsDeck([])
'''
@ brief Deck that player plays from
@ param WWCardsDeck
'''
WWGameManager.playerDeck = WWCardsDeck([])
'''
@ brief players winnings, After each turn, will be used again
@ param WWCardsDeck
'''
WWGameManager.playerGraveyardDeck = WWShuffleDeck([])
'''
@ brief player's card being used in war. does not include wagers
@ param WWCardsDeck
'''
WWGameManager.playerBattleDeck = WWCardsDeck([])
WWGameManager.winnerName = ""
WWGameManager.warCount = 0
sys.exit()
| 1.992188
| 2
|
api/utils.py
|
jdeepe/Dynamodb-ORM-Demo
| 0
|
12777826
|
<gh_stars>0
from pynamodb.attributes import ListAttribute, MapAttribute, NumberAttribute
class ModelIterator:
def __iter__(self):
for name, attr in self.get_attributes().items():
if isinstance(attr, MapAttribute):
yield name, getattr(self, name).as_dict()
if isinstance(attr, ListAttribute):
results = []
for el in getattr(self, name):
if isinstance(el, MapAttribute):
results.append((el.as_dict()))
else:
results.append(el)
yield name, results
elif isinstance(attr, NumberAttribute):
yield name, getattr(self, name)
else:
yield name, attr.serialize(getattr(self, name))
| 2.765625
| 3
|
cmsplugin_newsplus/menu.py
|
nimbis/cmsplugin-newsplus
| 6
|
12777827
|
<reponame>nimbis/cmsplugin-newsplus
"""
This module hooks into django-cms' menu system by providing a clear menu
hierarchy for every news item.
"""
from django.utils.translation import ugettext_lazy as _
from menus.menu_pool import menu_pool
from cms.menu_bases import CMSAttachMenu
from . import navigation
class NewsItemMenu(CMSAttachMenu):
name = _("News menu")
def get_nodes(self, request):
return navigation.get_nodes(request)
menu_pool.register_menu(NewsItemMenu)
| 1.726563
| 2
|
detection/migrations/0009_auto_20210128_0853.py
|
Kunal614/Pocket-Medical
| 1
|
12777828
|
<gh_stars>1-10
# Generated by Django 3.0.7 on 2021-01-28 08:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('detection', '0008_auto_20210127_2043'),
]
operations = [
migrations.RenameField(
model_name='medical_record',
old_name='hospital_name',
new_name='hospital',
),
]
| 1.71875
| 2
|
netaudio/__init__.py
|
chris-ritsen/network-audio-controller
| 22
|
12777829
|
<filename>netaudio/__init__.py<gh_stars>10-100
import sys
from .dante.browser import DanteBrowser
from .dante.channel import DanteChannel
from .dante.control import DanteControl
from .dante.device import DanteDevice
from .dante.multicast import DanteMulticast
from .dante.subscription import DanteSubscription
from .console.application import main
__author__ = "<NAME>"
__maintainer__ = "<NAME> <<EMAIL>>"
if sys.version_info <= (3, 9):
raise ImportError("Python version > 3.9 required.")
| 1.734375
| 2
|
tests/web_platform/css_flexbox_1/test_flexbox_flex_1_1_0_unitless.py
|
fletchgraham/colosseum
| 0
|
12777830
|
from tests.utils import W3CTestCase
class TestFlexbox_Flex110Unitless(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_flex-1-1-0-unitless'))
| 1.476563
| 1
|
cogs/voice.py
|
KacperKotlewski/discord-bot-learning.py
| 0
|
12777831
|
<reponame>KacperKotlewski/discord-bot-learning.py
import asyncio
import discord
import youtube_dl
from discord.ext import commands
# Suppress noise about console usage from errors
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# take first item from a playlist
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Voice(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(invoke_without_subcommand=True)
async def join(self, ctx: commands.Context):
channel = ctx.author.voice.channel
await channel.connect()
@commands.command()
async def leave(self, ctx: commands.Context):
for con in self.client.voice_clients:
if con.channel.id == ctx.author.voice.channel.id:
await con.disconnect(force=False)
def check_voice_chat_connection(self, ctx: commands.Context):
return ctx.author.voice.channel.id in [c.channel.id for c in self.client.voice_clients]
@commands.command()
async def play(self, ctx: commands.Context, *, url= 'https://www.youtube.com/watch?v=dQw4w9WgXcQ'):
ctxChannel = ctx.author.voice.channel
if not self.check_voice_chat_connection(ctx):
await ctx.invoke(self.join)
"""
async with ctx.typing():
with ([c for c in self.client.voice_clients if c.channel.id == ctxChannel.id][0]) as voice_client:
source = discord.FFmpegPCMAudio('music.mp3')
voice_client.play(source, after=lambda e: print('done', e))
voice_client.start()
#"""
async with ctx.typing():
player = await YTDLSource.from_url(url, loop=self.client.loop)
ctx.voice_client.play(player, after=lambda e: print('Player error: %s' % e) if e else None)
await ctx.send('Now playing: {}'.format(player.title))
@commands.command()
async def stop(self, ctx: commands.Context):
if self.check_voice_chat_connection(ctx):
ctxChannel = ctx.author.voice.channel
([c for c in self.client.voice_clients if c.channel.id == ctxChannel.id][0]).stop()
await ctx.invoke(self.leave)
@commands.command()
async def pause(self, ctx:commands.Context):
if self.check_voice_chat_connection(ctx):
ctxChannel = ctx.author.voice.channel
([c for c in self.client.voice_clients if c.channel.id == ctxChannel.id][0]).pause()
@commands.command()
async def resume(self, ctx:commands.Context):
if self.check_voice_chat_connection(ctx):
ctxChannel = ctx.author.voice.channel
([c for c in self.client.voice_clients if c.channel.id == ctxChannel.id][0]).resume()
@commands.command()
async def playing(self, ctx: commands.Context):
channel_id = ctx.author.voice.channel.id
for con in self.client.voice_clients:
if con.channel.id == channel_id:
vc = con
await ctx.send(vc.is_playing())
def setup(client):
client.add_cog(Voice(client))
| 2.5625
| 3
|
Feed Forward Network.py
|
yash1802/Recognizing-Objects-in-Photographs
| 0
|
12777832
|
<reponame>yash1802/Recognizing-Objects-in-Photographs<gh_stars>0
from kaggle import CIFAR10
from pybrain.datasets import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import LinearLayer
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.utilities import percentError
train = CIFAR10('train', datapath='/home/jvujjini/Desktop/CIFAR/')
dataSet = ClassificationDataSet(train.images.shape[1], nb_classes=train.n_classes, class_labels=train.label_names)
for image, image_class in zip(train.images,train.y):
dataSet.appendLinked(image, image_class.nonzero()[0])
dataSet._convertToOneOfMany( )
print "\nTraining"
feedForwardNetwork = buildNetwork(dataSet.indim, 5, dataSet.outdim, outclass=LinearLayer)
classifier = BackpropTrainer(feedForwardNetwork, dataset=dataSet, momentum=0.1, verbose=True, weightdecay=0.01)
for i in range(5):
classifier.trainEpochs(1)
result = percentError(classifier.testOnClassData(),dataSet['class'])
print "epoch# %4d" % classifier.totalepochs, " train error: %5.2f%%" % result
| 2.84375
| 3
|
Day 15/OneStringNoTrouble.py
|
sandeep-krishna/100DaysOfCode
| 0
|
12777833
|
'''
One String No Trouble
A string is called a good string if and only if two consecutive letters are not the same. For example, and are good while and are not.
You are given a string . Among all the good substrings of ,print the size of the longest one.
Input format
A single line that contains a string ().
Output format
Print an integer that denotes the size of the longest good substring of .
SAMPLE INPUT
ab
SAMPLE OUTPUT
2
Explanation
The complete string is good so the answer is .
'''
s = input()
count = 1
ans = []
for i in range(len(s)-1):
if s[i]!=s[i+1]:
count+=1
elif (s[i]==s[i+1]):
ans.append(count)
count = 1
print(max(max(ans),count)) if ans else print(count)
| 4.0625
| 4
|
setup.py
|
manojrege/pynetem
| 1
|
12777834
|
import setuptools
setuptools.setup(
name='pynetem',
version='0.1',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/manojrege/pynetem',
description='A Python wrapper library for network emulation on MacOS',
long_description=open('README.md').read(),
license=open('LICENSE').read(),
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"LICENSE :: OSI APPROVED :: BSD LICENSE",
],
package_data = {'pynetem': ['data/dummynet.conf.j2']},
include_package_data = True,
)
| 1.21875
| 1
|
visionseed/YtDataLink.py
|
BrickZhaotzh/yt-visionseed-sdk-python
| 1
|
12777835
|
# /**
# * Parse the YtDataLink protocol
# * author: chenliang @ Youtu Lab, Tencent
# * @example
# const SerialPort = require('serialport')
# const YtMsgParser = require('@api/YtMsgParser')
# const port = new SerialPort('/dev/ttyUSB0')
# const parser = port.pipe(new YtMsgParser())
# parser.on('data', console.log)
# */
from .YtMsg_pb2 import *
class YtDataLink:
class YtDataLinkStatus:
YT_DL_IDLE = 0
YT_DL_LEN1_PENDING = 1
YT_DL_LEN2_PENDING = 2
YT_DL_LEN3_PENDING = 3
YT_DL_LEN_CRC_H = 4
YT_DL_LEN_CRC_L = 5
YT_DL_DATA = 6
YT_DL_CRC_H = 7
YT_DL_CRC_L = 8
SOF = 0x10
TRANS = 0x11
ytMsgSize = 2097152
ccittTable = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
]
def __init__(self, port):
self.array = bytearray(0)
self.cursor = 0
self.mStatus = self.YtDataLinkStatus.YT_DL_IDLE
self.mMsgLen = 0
self.mCrc = 0
self.mCrcCalc = 0xffff
self.mTrans = False
self.mCrcSendCalc = 0xffff
self.port = port
def crcUpdate (self, ch, first):
if (first):
self.mCrcCalc = 0xffff
self.mCrcCalc = self.ccittTable[(self.mCrcCalc >> 8 ^ ch) & 0xff] ^ ((self.mCrcCalc << 8) & 0xffff)
def toHex (self, d):
return format(d, '02X')
def printBuf (self, buf):
print(''.join('{:02x} '.format(x) for x in buf))
def recvRunOnce(self):
if len(self.array) < 10:
buf = self.port.read(16)
self.array += buf
while len(self.array) > 0:
ch = self.array.pop(0)
# print('loop=', self.toHex(ch))
if (ch == self.SOF):
if (self.mStatus != self.YtDataLinkStatus.YT_DL_IDLE):
print('[YtMsg] unfinished pkg(%d/%d)', self.mBufi, self.mMsgLen)
self.mStatus = self.YtDataLinkStatus.YT_DL_LEN1_PENDING
elif (ch == self.TRANS):
self.mTrans = True
else:
# //转义后,不会出现SOF, TRANS
if (self.mTrans):
ch = ch ^ self.TRANS
self.mTrans = False
# if (self.mStatus == self.YtDataLinkStatus.YT_DL_IDLE):
if (self.mStatus == self.YtDataLinkStatus.YT_DL_LEN1_PENDING):
# print('[YtMsg] begin\n')
self.mStatus = self.YtDataLinkStatus.YT_DL_LEN1_PENDING
self.mMsgLen = 0
self.mCrc = 0
# //falls through
if (self.mStatus == self.YtDataLinkStatus.YT_DL_LEN1_PENDING or
self.mStatus == self.YtDataLinkStatus.YT_DL_LEN2_PENDING or
self.mStatus == self.YtDataLinkStatus.YT_DL_LEN3_PENDING):
# // console.log('[YtMsg]', self.toHex(ch))
self.crcUpdate(ch, self.mStatus == self.YtDataLinkStatus.YT_DL_LEN1_PENDING)
self.mMsgLen = (self.mMsgLen << 8) | ch
if (self.mStatus == self.YtDataLinkStatus.YT_DL_LEN3_PENDING):
# print('len=', self.mMsgLen)
if (self.mMsgLen > self.ytMsgSize):
# console.log('[YtMsg] Error: msg len %d > %d\n', self.mMsgLen, ytMsgSize)
self.mStatus = self.YtDataLinkStatus.YT_DL_IDLE
continue
self.mStatus = self.mStatus + 1
continue
if (self.mStatus == self.YtDataLinkStatus.YT_DL_LEN_CRC_H):
self.mCrc = (self.mCrc << 8) | ch
self.mStatus = self.mStatus + 1
continue
if (self.mStatus == self.YtDataLinkStatus.YT_DL_LEN_CRC_L):
self.mCrc = (self.mCrc << 8) | ch
if ((self.mCrcCalc) != self.mCrc):
print('[YtMsg] Error: msg len crc 0x%04x != 0x%04x\n', self.toHex(self.mCrcCalc), self.toHex(self.mCrc))
self.mStatus = self.YtDataLinkStatus.YT_DL_IDLE
continue
self.mStatus = self.mStatus + 1
self.mBuf = bytearray(self.mMsgLen)
self.mBufi = 0
# Brust read
# print('[YtMsg] Brust read', self.mMsgLen)
self.array += self.port.read(self.mMsgLen)
continue
if (self.mStatus == self.YtDataLinkStatus.YT_DL_DATA):
self.crcUpdate(ch, self.mBufi == 0)
self.mBuf[self.mBufi] = ch
self.mBufi += 1
if (self.mBufi == self.mMsgLen):
self.mStatus = self.mStatus + 1
continue
if (self.mStatus == self.YtDataLinkStatus.YT_DL_CRC_H):
self.mCrc = 0
# //falls through
if (self.mStatus == self.YtDataLinkStatus.YT_DL_CRC_H or
self.mStatus == self.YtDataLinkStatus.YT_DL_CRC_L):
self.mCrc = (self.mCrc << 8) | ch
if (self.mStatus == self.YtDataLinkStatus.YT_DL_CRC_L):
if ((self.mCrcCalc) != self.mCrc):
print('[YtMsg] Error: msg crc 0x%04x != 0x%04x\n', self.toHex(self.mCrcCalc), self.toHex(self.mCrc))
self.mStatus = self.YtDataLinkStatus.YT_DL_IDLE
continue
self.mStatus = self.YtDataLinkStatus.YT_DL_IDLE
target = YtMsg()
target.ParseFromString(self.mBuf)
return target
self.mStatus = self.mStatus + 1
continue
return None
def sendYtMsg (self, msg):
data = msg.SerializeToString()
self.write(data)
def crcSendUpdate (self, ch, first = False):
if (first):
self.mCrcSendCalc = 0xffff
self.mCrcSendCalc = self.ccittTable[(self.mCrcSendCalc >> 8 ^ ch) & 0xff] ^ ((self.mCrcSendCalc << 8) & 0xffff)
def write (self, data):
buf = bytearray(len(data) + 8)
buf[0] = 0x10
buf[1] = (len(data) >> 16) & 0xff
buf[2] = (len(data) >> 8) & 0xff
buf[3] = (len(data) >> 0) & 0xff
self.crcSendUpdate(buf[1], True)
self.crcSendUpdate(buf[2])
self.crcSendUpdate(buf[3])
buf[4] = (self.mCrcSendCalc >> 8) & 0xff
buf[5] = (self.mCrcSendCalc >> 0) & 0xff
buf[6:] = data
buf.append(0)
buf.append(0)
transLen = 0
# self.printBuf(buf)
for i in range(1, len(buf)):
# print(i, self.toHex(buf[i]))
if (i >= 6 and i < len(buf) - 2):
self.crcSendUpdate(buf[i], i == 6)
if (buf[i] == 0x10 or buf[i] == 0x11):
transLen += 1
buf[len(buf) - 2] = (self.mCrcSendCalc >> 8) & 0xff
buf[len(buf) - 1] = (self.mCrcSendCalc >> 0) & 0xff
for i in range(len(buf)-2, len(buf)):
# print(i, self.toHex(buf[i]))
if (buf[i] == 0x10 or buf[i] == 0x11):
transLen += 1
idx = 0
transedBuffer = bytearray(len(buf) + transLen)
for i in range(0, len(buf)):
if ((buf[i] == 0x10 or buf[i] == 0x11) and i > 0):
transedBuffer[idx] = 0x11
idx += 1
transedBuffer[idx] = buf[i] ^ 0x11
idx += 1
else:
transedBuffer[idx] = buf[i]
idx += 1
# self.printBuf(transedBuffer)
self.port.write(transedBuffer)
| 2.328125
| 2
|
facerecog_from_webcam(improved).py
|
tonyg0988/Face-Recognition-with-GUi
| 7
|
12777836
|
import face_recognition
import cv2
import numpy as np
import os
import re
from itertools import chain
known_people_folder='./database'
def scan_known_people(known_people_folder):
known_names = []
known_face_encodings = []
for file in image_files_in_folder(known_people_folder):
basename = os.path.splitext(os.path.basename(file))[0]
img = face_recognition.load_image_file(file)
encodings = face_recognition.face_encodings(img)
print('in face finding function')
if len(encodings) > 1:
click.echo("WARNING: More than one face found in {}. Only considering the first face.".format(file))
if len(encodings) == 0:
click.echo("WARNING: No faces found in {}. Ignoring file.".format(file))
else:
known_names.append(basename)
known_face_encodings.append(encodings[0])
def main(known_people_folder, image_to_check, cpus, tolerance, show_distance):
print('in second main')
known_face_encodings.append(encodings[0])
return known_names, known_face_encodings
def image_files_in_folder(folder):
print('in image files in folder fucntion')
return [os.path.join(folder, f) for f in os.listdir(folder) if re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I)]
#improved:-
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
#There you go, i pulled a little sneaky on you.
known_people_folder='./database'
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Create arrays of known face encodings and their names
known_face_names,known_face_encodings = scan_known_people(known_people_folder)
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
#Converting the imgage to HLS for brightness(Light intersity) Detection
imgHLS = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
Lchannel = imgHLS[:,:,1]
a=list(chain.from_iterable(Lchannel))
brightness=sum(a)/len(a)
if(brightness<=75):
condition="Very Poor"
if(brightness<=85 and brightness >75):
condition=" Poor"
if(brightness<=95 and brightness >85):
condition="Good"
if(brightness <=105 and brightness >95):
condition="Very Poor"
if(brightness >105):
condition="Excellent"
print(condition)
#print(brightness)
#np.array(Lchannel).tolist()
#print(type(Lchannel))
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.putText(frame,'Brightness/Visiblity: '+condition,(80,30), font,1,(255,255,255),1,cv2.LINE_AA)
cv2.putText(frame,'Press Q to Quit',(5,470), font,0.5,(255,255,255),1,cv2.LINE_AA)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
| 2.90625
| 3
|
python/Editioning.py
|
alanfeng99/oracle-db-examples
| 1
|
12777837
|
<filename>python/Editioning.py
#------------------------------------------------------------------------------
# Copyright 2016, 2017, Oracle and/or its affiliates. All rights reserved.
#
# Portions Copyright 2007-2015, <NAME>. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Editioning.py
# This script demonstrates the use of editioning, available in Oracle
# Database 11.2 and higher. See the Oracle documentation on the subject for
# additional information. Adjust the contants at the top of the script for
# your own database as needed.
#
# This script requires cx_Oracle 5.3 and higher.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
# define constants used throughout the script; adjust as desired
USER_NAME = "CX_ORACLE_TESTEDITIONS"
PASSWORD = "<PASSWORD>"
DBA_USER_NAME = "system"
DBA_PASSWORD = ""
DSN = ""
EDITION_NAME = "CX_ORACLE_E1"
# create user dropping it first, if necessary
connection = cx_Oracle.Connection(DBA_USER_NAME, DBA_PASSWORD, DSN)
cursor = connection.cursor()
cursor.execute("""
select username
from dba_users
where username = :name""",
name = USER_NAME)
names = [n for n, in cursor]
for name in names:
print("Dropping user", name)
cursor.execute("drop user %s cascade" % name)
print("Creating user", USER_NAME)
cursor.execute("create user %s identified by %s" % (USER_NAME, PASSWORD))
cursor.execute("grant create session, create procedure to %s" % USER_NAME)
cursor.execute("alter user %s enable editions" % USER_NAME)
# create edition, dropping it first, if necessary
cursor.execute("""
select edition_name
from dba_editions
where edition_name = :name""",
name = EDITION_NAME)
names = [n for n, in cursor]
for name in names:
print("Dropping edition", name)
cursor.execute("drop edition %s" % name)
print("Creating edition", EDITION_NAME)
cursor.execute("create edition %s" % EDITION_NAME)
cursor.execute("grant use on edition %s to %s" % (EDITION_NAME, USER_NAME))
# now connect to the newly created user and create a procedure
connection = cx_Oracle.Connection(USER_NAME, PASSWORD, DSN)
print("Edition should be None at this point, actual value is",
connection.edition)
cursor = connection.cursor()
cursor.execute("""
create or replace function TestEditions return varchar2 as
begin
return 'Base Edition';
end;""")
result = cursor.callfunc("TestEditions", str)
print("Function call should return Base Edition, actually returns", result)
# next, change the edition and recreate the procedure in the new edition
cursor.execute("alter session set edition = %s" % EDITION_NAME)
print("Edition should be %s at this point, actual value is" % EDITION_NAME,
connection.edition)
cursor.execute("""
create or replace function TestEditions return varchar2 as
begin
return 'Edition 1';
end;""")
result = cursor.callfunc("TestEditions", str)
print("Function call should return Edition 1, actually returns", result)
# next, change the edition back to the base edition and demonstrate that the
# original function is being called
cursor.execute("alter session set edition = ORA$BASE")
result = cursor.callfunc("TestEditions", str)
print("Function call should return Base Edition, actually returns", result)
# the edition can be set upon connection
connection = cx_Oracle.Connection(USER_NAME, PASSWORD, DSN,
edition = EDITION_NAME)
cursor = connection.cursor()
result = cursor.callfunc("TestEditions", str)
print("Function call should return Edition 1, actually returns", result)
# it can also be set via the environment variable ORA_EDITION
os.environ["ORA_EDITION"] = EDITION_NAME
connection = cx_Oracle.Connection(USER_NAME, PASSWORD, DSN)
print("Edition should be %s at this point, actual value is" % EDITION_NAME,
connection.edition)
cursor = connection.cursor()
result = cursor.callfunc("TestEditions", str)
print("Function call should return Edition 1, actually returns", result)
| 2.28125
| 2
|
citrination_client/views/tests/test_data_view_builder.py
|
CitrineInformatics/python-citrination-client
| 20
|
12777838
|
<reponame>CitrineInformatics/python-citrination-client
import pytest
from citrination_client.views.data_view_builder import DataViewBuilder
from citrination_client.views.descriptors.real_descriptor import RealDescriptor
from citrination_client.client import CitrinationClient
from citrination_client.views.descriptors import FormulationDescriptor
from os import environ
def test_add_descriptor():
builder = DataViewBuilder()
bandgap = RealDescriptor("band gap", lower_bound=-1, upper_bound=1)
builder.add_descriptor(bandgap, "output", True)
config = builder.build()
assert config["group_by"] == ["band gap"]
assert config["descriptors"] == [bandgap.as_dict()]
assert config["roles"] == {"band gap": "output"}
# Make sure duplicates raise an error
with pytest.raises(ValueError) as err:
builder.add_descriptor(RealDescriptor("band gap", lower_bound=-1, upper_bound=1), "input")
@pytest.mark.skipif(environ['CITRINATION_SITE'] != "https://citrination.com",
reason="Formulation test only supported on open citrination")
def test_formulation_descriptor():
client = CitrinationClient(environ["CITRINATION_API_KEY"])
builder = DataViewBuilder()
builder.dataset_ids([187195])
builder.add_formulation_descriptor(FormulationDescriptor("Formulation (idealMassPercent)"), client.data_views)
config = builder.build()
assert config["roles"]["Formulation (idealMassPercent)"] == "input"
assert config["roles"]["component type"] == "ignore"
assert config["roles"]["name"] == "ignore"
assert config["roles"]["% Y2O3 (volume, ideal)"] == "ignore"
y203_share_present = False
name_present = False
component_type_present = False
for desc in config["descriptors"]:
if desc["descriptor_key"] == "% Y2O3 (volume, ideal)":
y203_share_present = True
if desc["descriptor_key"] == "name":
name_present = True
if desc["descriptor_key"] == "component type":
component_type_present = True
assert y203_share_present
assert name_present
assert component_type_present
| 2.171875
| 2
|
django_crypto_trading_bot/trading_bot/models.py
|
chiragmatkar/django-crypto-trading-bot
| 37
|
12777839
|
<gh_stars>10-100
from __future__ import annotations
import logging
from collections import OrderedDict
from datetime import datetime
from decimal import ROUND_DOWN, Decimal, getcontext
from functools import partial
from multiprocessing.pool import ThreadPool
from operator import getitem
from time import sleep
from typing import List, Optional
import pytz
from ccxt.base.errors import RequestTimeout
from ccxt.base.exchange import Exchange
from django.core.cache import cache
from django.db import models
from django_crypto_trading_bot.users.models import User
from .api.client import get_client
from .exceptions import (
PriceToHigh,
PriceToLow,
NoQuoteCurrency,
NoMarket,
)
# Get an instance of a logger
logger = logging.getLogger(__name__)
class Timeframes(models.TextChoices):
MINUTE_1 = "1m"
MINUTE_3 = "3m"
MINUTE_5 = "5m"
MINUTE_15 = "15m"
MINUTE_30 = "30m"
HOUR_1 = "1h"
HOUR_2 = "2h"
HOUR_4 = "4h"
HOUR_6 = "6h"
HOUR_8 = "8h"
HOUR_12 = "12h"
DAY_1 = "1d"
DAY_3 = "3d"
WEEK_1 = "1w"
MONTH_1 = "1M"
class Exchanges(models.TextChoices):
BINANCE = "binance"
class Account(models.Model):
"""
API Account
for an exchange like binance
"""
exchange = models.CharField(max_length=250, choices=Exchanges.choices)
user = models.ForeignKey(User, on_delete=models.CASCADE)
api_key = models.CharField(max_length=250)
secret = models.CharField(max_length=250)
password = models.CharField(max_length=250, blank=True, null=True)
default_fee_rate = models.DecimalField(
max_digits=30, decimal_places=4, default=Decimal(0.1)
)
def get_account_client(self) -> Exchange:
return get_client(
exchange_id=self.exchange, api_key=self.api_key, secret=self.secret
)
def __str__(self):
return "{0}: {1} - {2}".format(self.pk, self.exchange, self.user.get_username())
class Currency(models.Model):
"""
Cryptocurrency
"""
name = models.CharField(max_length=50, blank=True, null=True)
short = models.CharField(max_length=50, unique=True)
def __str__(self):
return "{0}: {1}".format(self.pk, self.short)
class Market(models.Model):
"""
Market model based on https://github.com/ccxt/ccxt/wiki/Manual#market-structure
"""
base = models.ForeignKey(Currency, on_delete=models.PROTECT, related_name="base")
quote = models.ForeignKey(Currency, on_delete=models.PROTECT, related_name="quote")
active = models.BooleanField(default=True)
exchange = models.CharField(max_length=250, choices=Exchanges.choices)
precision_amount = models.IntegerField()
precision_price = models.IntegerField()
limits_amount_min = models.DecimalField(max_digits=30, decimal_places=8)
limits_amount_max = models.DecimalField(max_digits=30, decimal_places=8)
limits_price_min = models.DecimalField(max_digits=30, decimal_places=8)
limits_price_max = models.DecimalField(max_digits=30, decimal_places=8)
@property
def symbol(self):
return "{}/{}".format(self.base.short.upper(), self.quote.short.upper())
@property
def market_id(self):
return "{}{}".format(self.base.short.lower(), self.quote.short.lower())
@property
def baseId(self):
return self.base.short.lower()
@property
def quoteId(self):
return self.quote.short.lower()
def get_min_max_price(self, price: Decimal) -> Decimal:
"""
set the buy & sell min & max prices
"""
if price < self.limits_price_min:
price = self.limits_price_min
if price > self.limits_price_max:
price = self.limits_price_max
return price
def get_min_max_order_amount(self, amount: Decimal) -> Decimal:
"""
set the buy & sell min & max amount
"""
if amount < self.limits_amount_min:
amount = Decimal(0)
if amount > self.limits_amount_max:
amount = self.limits_amount_max
getcontext().prec = 20
return amount.quantize(Decimal(".1") ** self.precision_amount)
def __str__(self) -> str:
return self.symbol
class OrderErrorLog(models.Model):
class ErrorTypes(models.TextChoices):
Insufficient_Funds = "Insufficient Funds"
InvalidOrder = "Invalid Order"
order = models.ForeignKey(
"trading_bot.Order", related_name="error_log", on_delete=models.CASCADE
)
created = models.DateTimeField(auto_now_add=True)
error_type = models.CharField(max_length=50, choices=ErrorTypes.choices)
error_message = models.TextField(blank=True, null=True)
def __str__(self):
return "{0}: {1}".format(self.created, self.error_type)
class Order(models.Model):
"""
Order based on https://github.com/ccxt/ccxt/wiki/Manual#order-structure
"""
# STATUS_CHOICE
class Status(models.TextChoices):
OPEN = "open"
CLOSED = "closed"
CANCELED = "canceled"
EXPIRED = "expired"
REJECTED = "rejected"
NOT_MIN_NOTIONAL = "not min notional"
# ORDER_TYPE_CHOICE
class OrderType(models.TextChoices):
MARKET = "market"
LIMIT = "limit"
# SIDE_CHOICE
class Side(models.TextChoices):
SIDE_BUY = "buy"
SIDE_SELL = "sell"
bot = models.ForeignKey("trading_bot.Bot", on_delete=models.CASCADE)
order_id = models.CharField(max_length=255, unique=True)
timestamp = models.DateTimeField()
status = models.CharField(
max_length=20, choices=Status.choices, default=Status.OPEN
)
order_type = models.CharField(max_length=8, choices=OrderType.choices)
side = models.CharField(max_length=4, choices=Side.choices)
price = models.DecimalField(max_digits=30, decimal_places=8) # quote currency
amount = models.DecimalField(
max_digits=30, decimal_places=8
) # ordered amount of base currency
filled = models.DecimalField(
max_digits=30, decimal_places=8, default=0
) # filled amount of base currency
next_order = models.ForeignKey(
"self", on_delete=models.CASCADE, blank=True, null=True
)
fee_currency = models.ForeignKey(
Currency, on_delete=models.PROTECT, blank=True, null=True
)
fee_cost = models.DecimalField(
max_digits=30, decimal_places=8, blank=True, null=True
)
fee_rate = models.DecimalField(
max_digits=30, decimal_places=8, blank=True, null=True
)
# rising chart
last_price_tick = models.DecimalField(
max_digits=30, decimal_places=8, blank=True, null=True
)
market = models.ForeignKey(
Market, on_delete=models.PROTECT, blank=True, null=True
) # Cryptomarket like TRX/BNB
def remaining(self) -> Decimal:
"""
remaining amount to fill
"""
return self.amount - self.filled
def cost(self) -> Decimal:
"""
'filled' * 'price' (filling price used where available)
"""
return self.filled * self.price
def base_amount(self) -> Decimal:
"""
Get base amount minus cost
Returns:
Decimal -- [description]
"""
if self.bot.market:
if self.fee_cost and self.bot.market.base == self.fee_currency:
return self.filled - self.fee_cost
return self.filled
def quote_amount(self) -> Decimal:
"""
Get quote amount minus cost
Returns:
Decimal -- [description]
"""
# todo logic error?
if self.bot.market:
if (
self.fee_cost
and self.bot.market
and self.bot.market.quote == self.fee_currency
):
return self.cost() - self.fee_cost
return self.cost()
def get_retrade_amount(self, price: Decimal) -> Decimal:
"""
get retrade amount
Returns:
Decimal -- retrade amount
"""
# todo add test case
market: Market
if self.bot.market:
market = self.bot.market
elif self.market:
market = self.market
else:
raise NoMarket("No market in bot or order available!")
if price < market.limits_price_min:
raise PriceToLow()
if price > market.limits_price_max:
raise PriceToHigh()
getcontext().rounding = ROUND_DOWN
amount: Decimal = Decimal(self.amount)
fee_rate: Decimal
if self.fee_rate:
fee_rate = self.fee_rate
else:
fee_rate = self.bot.account.default_fee_rate
fee_cost: Decimal = amount / Decimal(100) * fee_rate
amount -= fee_cost
if self.side == Order.Side.SIDE_SELL:
quote_amount: Decimal = amount * self.price
amount = quote_amount / price
amount -= amount % market.limits_amount_min
return market.get_min_max_order_amount(amount=amount)
@property
def errors(self) -> int:
return OrderErrorLog.objects.filter(order=self).count()
def __str__(self):
return "{0}: {1}".format(self.pk, self.order_id)
class Bot(models.Model):
"""
Trading Bot
"""
class TradeMode(models.TextChoices):
WAVE_RIDER = "wave rider" # switch between buy & sell orders
RISING_CHART = "rising chart" # buy coin with rising price
# todo add validator for each trade_mode
account = models.ForeignKey(Account, on_delete=models.CASCADE) # API Account
trade_mode = models.CharField(
max_length=20, choices=TradeMode.choices, default=TradeMode.WAVE_RIDER
)
created = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
# wave rider
market = models.ForeignKey(
Market, on_delete=models.PROTECT, blank=True, null=True
) # Cryptomarket like TRX/BNB
timeframe = models.CharField(
max_length=10,
choices=Timeframes.choices,
default=Timeframes.MONTH_1,
blank=True,
null=True,
)
# rising chart
quote = models.ForeignKey(Currency, on_delete=models.PROTECT, blank=True, null=True)
max_amount = models.DecimalField(
max_digits=30, decimal_places=8, blank=True, null=True
)
min_rise = models.DecimalField(
max_digits=30, decimal_places=2, blank=True, null=True
)
stop_loss = models.DecimalField(
max_digits=30, decimal_places=2, blank=True, null=True
)
lock_time = models.IntegerField(default=12)
@property
def start_amount(self) -> Optional[Decimal]:
orders: models.Manager[Order] = Order.objects.filter(bot=self).order_by(
"timestamp"
)[:1]
if orders:
return orders[0].amount
return None
@property
def current_amount(self) -> Optional[Decimal]:
orders: models.Manager[Order] = Order.objects.filter(
bot=self, status=Order.Status.CLOSED
).order_by("-timestamp")[:1]
if orders:
return orders[0].amount
return None
@property
def estimate_current_amount(self) -> Optional[Decimal]:
orders: models.Manager[Order] = Order.objects.filter(bot=self).order_by(
"-timestamp"
)[:1]
if orders:
return orders[0].amount
return None
@property
def roi(self) -> Optional[Decimal]:
start_amount: Optional[Decimal] = self.start_amount
if not start_amount:
return None
current_amount: Optional[Decimal] = self.current_amount
if not current_amount:
return None
getcontext().prec = 2
win: Decimal = current_amount - start_amount
return win / current_amount * Decimal(100)
@property
def estimate_roi(self) -> Optional[Decimal]:
start_amount: Optional[Decimal] = self.start_amount
if not start_amount:
return None
current_amount: Optional[Decimal] = self.estimate_current_amount
if not current_amount:
return None
getcontext().prec = 2
win: Decimal = current_amount - start_amount
return win / current_amount * Decimal(100)
@property
def orders_count(self) -> int:
return Order.objects.filter(bot=self).count()
def fetch_balance(self, test: bool = False) -> Decimal:
# todo add test case
if not self.quote:
raise NoQuoteCurrency("Bot has not quote currency!")
if test:
return Decimal(1)
return Decimal(
self.account.get_account_client().fetch_balance()["free"][
self.quote.short.upper()
]
)
def fetch_tickers(self) -> OrderedDict:
exchange: Exchange = get_client(exchange_id=self.account.exchange)
tickers: dict = cache.get_or_set(
"tickers-{}".format(self.account.exchange), exchange.fetch_tickers(), 50
)
item: dict
return OrderedDict(
sorted(
tickers.items(),
reverse=True,
key=lambda item: getitem(item[1], "percentage"),
)
)
def __str__(self):
return "{0}: {1} - {2}".format(
self.pk, self.account.user.get_username(), self.market
)
class Saving(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
bot = models.ForeignKey(Bot, on_delete=models.CASCADE)
amount = models.DecimalField(
max_digits=30, decimal_places=8
) # ordered amount of base currency
currency = models.ForeignKey(Currency, on_delete=models.PROTECT)
class Trade(models.Model):
"""
Trade based on https://github.com/ccxt/ccxt/wiki/Manual#trade-structure
"""
order = models.ForeignKey(
Order, related_name="trade_order", on_delete=models.CASCADE
)
trade_id = models.CharField(max_length=255, unique=True)
timestamp = models.DateTimeField()
taker_or_maker = models.CharField(max_length=8, choices=Order.OrderType.choices)
amount = models.DecimalField(max_digits=30, decimal_places=8)
fee_currency = models.ForeignKey(Currency, on_delete=models.PROTECT)
fee_cost = models.DecimalField(max_digits=30, decimal_places=8)
fee_rate = models.DecimalField(
max_digits=30, decimal_places=8, blank=True, null=True
)
def cost(self):
return self.amount * self.order.price
class OHLCV(models.Model):
"""
OHLCV candles https://github.com/ccxt/ccxt/wiki/Manual#ohlcv-structure
"""
market = models.ForeignKey(Market, on_delete=models.PROTECT)
timeframe = models.CharField(max_length=10, choices=Timeframes.choices)
timestamp = models.DateTimeField()
open_price = models.DecimalField(max_digits=30, decimal_places=8)
highest_price = models.DecimalField(max_digits=30, decimal_places=8)
lowest_price = models.DecimalField(max_digits=30, decimal_places=8)
closing_price = models.DecimalField(max_digits=30, decimal_places=8)
volume = models.DecimalField(max_digits=30, decimal_places=8)
@staticmethod
def get_OHLCV(candle: List[float], timeframe: str, market: Market) -> OHLCV:
"""Get a OHLCV candle from a OHLCV request
Arguments:
candle {List[float]} -- candle list
timeframe {Timeframes} -- timeframe from candle
market {Market} -- market from candle
Returns:
OHLCV -- unsaved OHLCV candle
"""
return OHLCV(
market=market,
timeframe=timeframe,
timestamp=datetime.fromtimestamp(candle[0] / 1000, tz=pytz.timezone("UTC")),
open_price=Decimal(candle[1]),
highest_price=Decimal(candle[2]),
lowest_price=Decimal(candle[3]),
closing_price=Decimal(candle[4]),
volume=Decimal(candle[5]),
)
@staticmethod
def create_OHLCV(
candle: List[float], timeframe: Timeframes, market: Market
) -> OHLCV:
"""Get a saved OHLCV candle from a OHLCV request
Arguments:
candle {List[float]} -- candle list
timeframe {Timeframes} -- timeframe from candle
market {Market} -- market from candle
Returns:
OHLCV -- saved OHLCV candle
"""
ohlcv: OHLCV = OHLCV.get_OHLCV(
candle=candle, timeframe=timeframe, market=market
)
ohlcv.save()
return ohlcv
@staticmethod
def last_candle(timeframe: Timeframes, market: Market) -> Optional[OHLCV]:
"""Get last candle by timestamp of market & timeframe
Arguments:
timeframe {Timeframes} -- timeframe from candle
market {Market} -- market from candle
Returns:
Optional[OHLCV] -- last candle by timestamp of market & timeframe
"""
return (
OHLCV.objects.filter(timeframe=timeframe, market=market)
.order_by("timestamp")
.last()
)
@staticmethod
def update_new_candles(market: Market, timeframe: Timeframes):
"""Update all candles for a single market of a timeframe
Arguments:
market {Market} -- market from candle
timeframe {Timeframes} -- timeframe from candle
"""
exchange: Exchange = get_client(exchange_id=market.exchange)
last_candle: Optional[OHLCV] = OHLCV.last_candle(
timeframe=timeframe, market=market
)
last_candle_time: int = 0
if last_candle:
last_candle_time = int(last_candle.timestamp.timestamp()) * 1000
ohlcvs: List[OHLCV] = list()
while True:
try:
candles: List[List[float]] = exchange.fetch_ohlcv(
symbol=market.symbol,
timeframe=timeframe,
since=last_candle_time + 1,
)
except RequestTimeout:
logger.warning(
"Connetion error from {} ... wait 120s for next try".format(
market.exchange
)
)
sleep(120)
continue
for candle in candles:
ohlcvs.append(
OHLCV.get_OHLCV(candle=candle, timeframe=timeframe, market=market)
)
if len(ohlcvs) >= 10000:
OHLCV.objects.bulk_create(ohlcvs)
ohlcvs.clear()
# no new candles
if len(candles) == 0:
break
last_candle_time = int(candles[-1][0])
OHLCV.objects.bulk_create(ohlcvs)
ohlcvs.clear()
logger.info(
"Update market {} for timeframe {}.".format(market.symbol, timeframe)
)
@staticmethod
def update_new_candles_all_markets(timeframe: Timeframes):
"""Update all candles for all markets of a timeframe
Arguments:
timeframe {Timeframes} -- timeframe from candle
"""
markets: List[Market] = list()
for market in Market.objects.filter(active=True):
markets.append(market)
# Make the Pool of workers
pool = ThreadPool(8)
pool.map(partial(OHLCV.update_new_candles, timeframe=timeframe), markets)
# Close the pool and wait for the work to finish
pool.close()
pool.join()
| 2.09375
| 2
|
lib/utils/jobs.py
|
rcarmo/yaki-tng
| 2
|
12777840
|
<reponame>rcarmo/yaki-tng<filename>lib/utils/jobs.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2012, <NAME>
Description: In-process job management
License: MIT (see LICENSE.md for details)
"""
import os, sys, logging, time, traceback, multiprocessing, gc
from cPickle import loads, dumps
from Queue import PriorityQueue, Empty
from threading import Thread, Semaphore
from uuid import uuid4
from functools import partial
from collections import defaultdict
log = logging.getLogger(__name__)
default_priority = 0
max_workers = multiprocessing.cpu_count() * 2
class Pool:
"""Represents a thread pool"""
def __init__(self, workers = max_workers, rate_limit = 1000):
self.max_workers = workers
self.mutex = Semaphore()
self.results = {}
self.retries = defaultdict(int)
self.queue = PriorityQueue()
self.threads = []
self.rate_limit = rate_limit
def _tick(self):
time.sleep(1.0/self.rate_limit)
# clean up finished threads
self.threads = [t for t in self.threads if t.isAlive()]
return (not self.queue.empty()) or (len(self.threads) > 0)
def _loop(self):
"""Handle task submissions"""
def run_task(priority, f, uuid, retries, args, kwargs):
"""Run a single task"""
try:
t.name = getattr(f, '__name__', None)
result = f(*args, **kwargs)
except Exception as e:
# Retry the task if applicable
if log:
log.error(traceback.format_exc())
if retries > 0:
with self.mutex:
self.retries[uuid] += 1
# re-queue the task with a lower (i.e., higher-valued) priority
self.queue.put((priority+1, dumps((f, uuid, retries - 1, args, kwargs))))
self.queue.task_done()
return
result = e
with self.mutex:
self.results[uuid] = dumps(result)
self.retries[uuid] += 1
self.queue.task_done()
while self._tick():
# spawn more threads to fill free slots
log.warn("Running %d/%d threads" % (len(self.threads),self.max_workers))
if len(self.threads) < self.max_workers:
log.debug("Queue Length: %d" % self.queue.qsize())
try:
priority, data = self.queue.get(True, 1.0/self.rate_limit)
except Empty:
continue
f, uuid, retries, args, kwargs = loads(data)
t = Thread(target=run_task, args=[priority, f, uuid, retries, args, kwargs])
t.setDaemon(True)
self.threads.append(t)
t.start()
log.debug("Exited loop.")
for t in self.threads:
t.join()
def stop(self):
"""Flush the job queue"""
self.queue = PriorityQueue()
def start(self, daemonize=False):
"""Pool entry point"""
self.results = {}
self.retries = defaultdict(int)
if daemonize:
t = Thread(target = self._loop, args=[self])
t.setDaemon(True)
t.start()
return
else:
self._loop()
default_pool = Pool()
class Deferred(object):
"""Allows lookup of task results and status"""
def __init__(self, pool, uuid):
self.uuid = uuid
self.pool = pool
self._result = None
@property
def result(self):
if self._result is None:
with self.pool.mutex:
if self.uuid in self.pool.results.keys():
self._result = loads(self.pool.results[self.uuid])
return self._result
@property
def retries(self):
return self.pool.retries[self.uuid]
def task(func=None, pool=None, max_retries=0, priority=default_priority):
"""Task decorator - setus up a .delay() attribute in the task function"""
if func is None:
return partial(task, pool=pool, max_retries=max_retries)
if pool is None:
pool = default_pool
def delay(*args, **kwargs):
uuid = str(uuid4()) # one for each task
pool.queue.put((priority,dumps((func, uuid, max_retries, args, kwargs))))
return Deferred(pool, uuid)
func.delay = delay
func.pool = pool
return func
def start(daemonize = False):
default_pool.start(daemonize = daemonize)
| 2.171875
| 2
|
SPRINT4/src/servicios/main.py
|
PTIN2020/B3
| 0
|
12777841
|
from socketclient import Client
import socketio
import csv
from random import randrange
import threading
import time
def toni(car_id):
sio = None
try:
sio = socketio.Client(ssl_verify=False)
sio.connect('http://localhost:3003/')
except socketio.exceptions.ConnectionError:
print('[ERROR] Server is off')
@sio.event
def notification(data):
print('[INFO] Notification: ', data)
@sio.event
def message(data):
print('[INFO] Message: ', data)
@sio.event
def bad_status(data):
print('[ERROR] Bad status:', data)
@sio.event
def bad_notify(data):
print('[ERROR] Bad notify:', data)
@sio.event
def connect():
socket.isConnected = True
socket.send('id', socket.id)
print("[INFO] Connected!")
@sio.event
def connect_error():
print("[ERROR] The connection failed!")
@sio.event
def disconnect():
socket.isConnected = False
print("[INFO] Disconnected!")
socket = Client(sio, car_id)
pos = randrange(469)
with open('cami.csv', 'r') as file:
reader = list(csv.reader(file))
for i in range(pos, len(reader), 2):
row = reader[i]
socket.update_position(int(row[0]), int(row[1][1:]))
time.sleep(3)
for i in range(0, len(reader), 2):
row = reader[i]
socket.update_position(int(row[0]), int(row[1][1:]))
time.sleep(3)
socket.disconnect()
print('End car ', car_id)
threads = list()
for index in range(1, 6):
car = threading.Thread(target=toni, args=("CH000" + str(index),), daemon=True)
threads.append(car)
car.start()
for thread in threads:
thread.join()
print("Main End")
| 2.859375
| 3
|
dscoe_utils/dscoe_utils.py
|
Peter-32/dscoe_utils
| 0
|
12777842
|
# Databricks notebook source
# update_dscoe_utils() {
# current_path=`pwd`
# current_folder=${current_path##*/}
# # pip install --upgrade setuptools
# # pip install --upgrade twine
# mkdir /Users/petermyers/Desktop/dscoe_utils/
# py_commons_env
# rm /Users/petermyers/Desktop/dscoe_utils/dscoe_utils.py
# databricks workspace export_dir /Shared/dscoe_utils /Users/petermyers/Desktop/dscoe_utils/dscoe_utils
# touch /Users/petermyers/Desktop/dscoe_utils/__init__.py
# cd /Users/petermyers/Desktop/dscoe_utils/
# git_push update
# Rm -rf dist
# Echo ""
# Echo ""
# Echo "Go to GitHub and make a new release: https://github.com/Peter-32/dscoe_utils/releases"
# Echo "Just fill out the tag as something like v_02"
# Echo "copy the tar.gz as a link and place it in the setup script"
# Echo "update the setup.py to use the new version number"
# Echo "Then run build_py"
# Echo "Lastly run deploy_py"
# Echo "username and password are: peterm3232"
# Echo "<PASSWORD>"
# }
def initialize_databases(spark):
spark.sql("create database back_room")
spark.sql("create database front_room")
def view_back_room(spark):
spark.sql("use back_room")
display(spark.sql("show tables"))
def view_front_room(spark):
spark.sql("use front_room")
display(spark.sql("show tables"))
def run_a_notebook(dbutils, path="/Shared/utils/test", arguments={}):
dbutils.notebook.run(path, timeout_seconds=86400, arguments=arguments)
# COMMAND ----------
| 2.515625
| 3
|
example/use_cases/SpeechToTextOfDirectoryOfAudioFiles.py
|
symblai/symbl-python-sdk
| 4
|
12777843
|
# SpeechToText of multiple audio files in a directory
# Using this code snippet you can convert multiple audio files from specific directory to the text format
# It will make seperate .txt files for each audio file into the given directory path and will save the transcriptions of all audio files into those .txt files
import symbl
from os import listdir
from os.path import isfile, join
# returns lambda function with fileName which is under processing
def save_transcriptions_in_file(fileName):
return lambda conversation_object: on_success(conversation_object, fileName)
# returns actual callback to save the transcriptions of a conversation in a file
def on_success(conversation_object, fileName):
transcriptions = conversation_object.get_messages()
file = open(fileName + ".txt","w+")
file.write(str(transcriptions))
file.close()
# An ‘r’ preceding a string denotes a raw, (almost) un-escaped string.
# The escape character is backslash, that is why a normal string will not work as a Windows path string.
# If you are using Mac or Linux, no need to append `r` before <directory path>
directory_path = r'<directory_path>'
files = [join(directory_path, file) for file in listdir(directory_path) if isfile(join(directory_path, file))]
# Process audio files in the above mentioned directory
for file in files:
job = symbl.Audio.process_file(
# credentials={app_id: <app_id>, app_secret: <app_secret>}, #Optional, Don't add this parameter if you have symbl.conf file in your home directory
file_path=file, wait=False).on_complete(save_transcriptions_in_file(file))
| 3.515625
| 4
|
aiograph/utils/exceptions.py
|
fakegit/aiograph
| 45
|
12777844
|
# TODO: Find more error types
class TelegraphError(Exception):
__subclasses = []
match = None
text = None
@classmethod
def get_text(cls):
if cls.text is None and cls.match is not None:
return cls.match.replace('_', ' ').capitalize() + '!'
return cls.text
def __init_subclass__(cls, match=None, **kwargs):
super(TelegraphError, cls).__init_subclass__(**kwargs)
if match is not None:
cls.match = match.upper()
cls.__subclasses.append(cls)
@classmethod
def detect(cls, description):
"""
Automation detect error type.
:param description:
:raise: TelegramError
"""
match = description.upper()
for err in cls.__subclasses:
if err is cls:
continue
if err.match in match:
raise err(err.get_text() or description)
raise cls(description)
class NoFilesPassed(TelegraphError):
def __init__(self):
super(NoFilesPassed, self).__init__('No files has been uploaded.')
class AccessTokenInvalid(TelegraphError, match='ACCESS_TOKEN_INVALID'):
pass
class FieldsFormatInvalid(TelegraphError, match='FIELDS_FORMAT_INVALID'):
pass
class UnknownMethod(TelegraphError, match='UNKNOWN_METHOD'):
pass
class ContentRequired(TelegraphError, match='CONTENT_REQUIRED'):
pass
class ContentTextRequired(TelegraphError, match='CONTENT_TEXT_REQUIRED'):
pass
class PageNotFound(TelegraphError, match='PAGE_NOT_FOUND'):
pass
| 2.484375
| 2
|
saveHarmonicFunction.py
|
Peeks1/AffineCarpetProject
| 0
|
12777845
|
<filename>saveHarmonicFunction.py
import numpy as np
import graphClass as gc
import copy
import os
import os.path as p
import time
# INPUT HERE
# what level affine carpet would you like:
precarpet_level = 5
# how large would you like the small squares to be:
sideOfSmallSquares = .1
# how many runs
numRuns = 100
# would you like a cross or X-graph (input "+" or "x"):
kindOfGraph = "x"
# how stretched would you like the carpet to be (this will be how far the 0 boundary will be from the 1 boundary
stretchFactor = 1/4
# other important variable calculated from above variables
sideOfCenterHole = 1 - sideOfSmallSquares * 2
# file naming variables
kogString = ''
typeOfCarpet = str(sideOfSmallSquares.__round__(3)) + "affineCarpet1x" + str(stretchFactor.__round__(3))
level = 'level' + str(precarpet_level)
if kindOfGraph == '+':
kogString = 'crossGraphData'
elif kindOfGraph == 'x':
kogString = 'xGraphData'
else:
exit()
# get file ready for writing data
if precarpet_level < 1:
exit()
filePath = kogString + "/" + typeOfCarpet + "/" + level + '.txt'
if p.isfile(filePath):
print('You already have data for this carpet. Press y to confirm that you would like to overwrite this data.\n')
keypress = input()
if not keypress == 'y':
exit()
if not p.isdir(kogString + '/' + typeOfCarpet):
os.makedirs(kogString + '/' + typeOfCarpet)
file = open(filePath, "w+")
starttime = time.time()
# 2 cases: level 1 carpet or higher than level 1 carpet
# if level 1 carpet, use the old system of building the carpet and then applying relaxations
# if not, use data from the previous carpet (program will crash if it doesn't exist) to build a graph and apply
# starting values
if precarpet_level == 1:
# build the level 0 carpet
aC0 = gc.Graph()
if kindOfGraph == "+":
aC0.add_vertex("a", np.array([0, 0.5]))
aC0.add_vertex("b", np.array([0.5, 1]))
aC0.add_vertex("c", np.array([1, 0.5]))
aC0.add_vertex("d", np.array([0.5, 0]))
aC0.add_vertex("e", np.array([0.5, 0.5]))
elif kindOfGraph == "x":
aC0.add_vertex("a", np.array([0, 0]))
aC0.add_vertex("b", np.array([0, 1]))
aC0.add_vertex("c", np.array([1, 1]))
aC0.add_vertex("d", np.array([1, 0]))
aC0.add_vertex("e", np.array([0.5, 0.5]))
else:
print("You need to input '+' or 'x' for kindOfGraph")
exit()
aC0.add_edge("a", "e")
aC0.add_edge('b', 'e')
aC0.add_edge('c', 'e')
aC0.add_edge('d', 'e')
# variables needed for the for loop that builds the precarpet to the desired level
aCn = gc.Graph()
aCn_plus_one = aC0
copyOfACn = gc.Graph()
# listOfContractionParameters[i][0] is the scaleX
# listOfContractionParameters[i][1] is scaleY
# listOfContractionParameters[i][2] is fixedPoint
listOfContractionParameters = [[sideOfSmallSquares, sideOfSmallSquares, np.array([0, 0])], # q0
[sideOfCenterHole, sideOfSmallSquares, np.array([0.5, 0])], # q1
[sideOfSmallSquares, sideOfSmallSquares, np.array([1, 0])], # q2
[sideOfSmallSquares, sideOfCenterHole, np.array([1, 0.5])], # q3
[sideOfSmallSquares, sideOfSmallSquares, np.array([1, 1])], # q4
[sideOfCenterHole, sideOfSmallSquares, np.array([0.5, 1])], # q5
[sideOfSmallSquares, sideOfSmallSquares, np.array([0, 1])], # q6
[sideOfSmallSquares, sideOfCenterHole, np.array([0, 0.5])]] # q7
# carpet building loop
for k in range(precarpet_level):
aCn = copy.deepcopy(aCn_plus_one)
aCn_plus_one = gc.Graph()
for i in range(0, 8):
copyOfACn = copy.deepcopy(aCn)
copyOfACn.update_all_vertices_names(str(i))
copyOfACn.contract_graph_affine(listOfContractionParameters[i][0], listOfContractionParameters[i][1],
listOfContractionParameters[i][2])
aCn_plus_one.add_graph(copyOfACn)
aCn_plus_one.remove_redundancies()
# applying harmonic function
aCn_plus_one.apply_harmonic_function_affine(numRuns=numRuns)
# writing file
file.write(str(numRuns) + "runs\n")
if kindOfGraph == '+': # the cross graph data format
for v in aCn_plus_one.vertices:
if v[0] == 'e':
# parameters are as follows: position, displacement, and harmonic values
# position: where the center of the graph is
# displacement: each center has 4 neighbors perpendicular to them, so the first value is top/bottom
# displacement and second is left/right
# harmonic values: the harmonic values of the point above, below, left, and right neighbors of center
savedParameters = [[None, None], [None, None], [None, None, None, None]]
savedParameters[0][0] = str(aCn_plus_one.vertices[v][1][0])
savedParameters[0][1] = str(aCn_plus_one.vertices[v][1][1])
for n in aCn_plus_one.vertices[v][0]:
distance = aCn_plus_one.vertices[v][1] - aCn_plus_one.vertices[n][1]
if distance[0] == 0: # must be top or bottom, as the x values were the same
if distance[1] < 0: # must be top, as the neighbor has a higher y value
savedParameters[2][0] = str(aCn_plus_one.vertices[n][2])
elif distance[1] > 0: # must be bottom, as the neighbor has a lower y value
savedParameters[1][0] = str(distance[1])
savedParameters[2][1] = str(aCn_plus_one.vertices[n][2])
elif distance[1] == 0: # must be left or right, as the y values were the same
if distance[0] < 0: # must be right, as the neighbor has a higher x value
savedParameters[2][3] = str(aCn_plus_one.vertices[n][2]) + '\n'
elif distance[0] > 0: # must be left, as the neighbor has a lower x value
savedParameters[1][1] = str(distance[0])
savedParameters[2][2] = str(aCn_plus_one.vertices[n][2])
listOfParameters = []
seperator = '|'
for parameter in savedParameters:
for e in parameter:
listOfParameters.append(e)
file.write(seperator.join(listOfParameters))
else: # the x graph data format
for v in aCn_plus_one.vertices:
if v[0] == 'e':
# parameters are as follows: position, displacement, and harmonic values
# position: where the center of the graph is
# displacement: how far each point is from the center (they're all the same displacement, just different
# signs in where; first number is horizontal, second is vertical
# harmonic values: the harmonic values of the point top left, bottom left, bottom right, and top right
# neighbors of center
savedParameters = [[None, None], [None, None], [None, None, None, None]]
savedParameters[0][0] = str(aCn_plus_one.vertices[v][1][0])
savedParameters[0][1] = str(aCn_plus_one.vertices[v][1][1])
neigh = aCn_plus_one.vertices[v][0][0] # doesn't matter which neighbor used
distance = aCn_plus_one.vertices[v][1] - aCn_plus_one.vertices[neigh][1]
savedParameters[1][0] = abs(distance[0])
savedParameters[1][1] = abs(distance[1])
for n in aCn_plus_one.vertices[v][0]:
disN = aCn_plus_one.vertices[n][1] - aCn_plus_one.vertices[v][1]
if disN[0] < 0: # must be on left
if disN[1] > 0: # must be top left
savedParameters[2][0] = aCn_plus_one.vertices[n][2]
else: # bottom left
savedParameters[2][1] = aCn_plus_one.vertices[n][2]
else: # must be on right
if disN[1] > 0: # must be top right
savedParameters[2][3] = aCn_plus_one.vertices[n][2]
else: # bottom right
savedParameters[2][2] = aCn_plus_one.vertices[n][2]
listOfParameters = []
seperator = '|'
for parameter in savedParameters:
for e in parameter:
listOfParameters.append(str(e))
listOfParameters[-1] = listOfParameters[-1] + '\n'
file.write(seperator.join(listOfParameters))
else:
# open data for carpet of lower level
prevLevel = 'level' + str(precarpet_level - 1)
prevFilePath = kogString + "/" + typeOfCarpet + "/" + prevLevel + '.txt'
if not p.isfile(prevFilePath):
print('You need to generate the carpet of the previous level first')
exit()
prevFile = open(prevFilePath, 'r')
lowerCarpetData = prevFile.readlines()
del lowerCarpetData[0]
# iterate through the data and build the graph with correct harmonic functions
aCn_plus_one = gc.Graph()
i = 0
if kindOfGraph == '+':
for line in lowerCarpetData:
# create graph and list of data
aCn = gc.Graph()
parameters = list(line.split("|"))
parameters = [float(j) for j in parameters]
# data from line
centerPosition = np.array(parameters[0:2])
vertDisplacement = parameters[2]
horizontalDisplacement = parameters[3]
topHarmonic = parameters[4]
bottHarmonic = parameters[5]
leftHarmonic = parameters[6]
rightHarmonic = parameters[7]
avgHarmonic = sum(parameters[4:]) / 4
# convert to data for making graph
# each point is in a certain vertical (v) and horizontal (h) file
v1 = centerPosition[0] - (sideOfCenterHole + 2 * sideOfSmallSquares) * horizontalDisplacement
v2 = centerPosition[0] - (sideOfCenterHole + sideOfSmallSquares) * horizontalDisplacement
v3 = centerPosition[0] - sideOfCenterHole * horizontalDisplacement
v4 = centerPosition[0]
v5 = centerPosition[0] + sideOfCenterHole * horizontalDisplacement
v6 = centerPosition[0] + (sideOfCenterHole + sideOfSmallSquares) * horizontalDisplacement
v7 = centerPosition[0] + (sideOfCenterHole + 2 * sideOfSmallSquares) * horizontalDisplacement
h1 = centerPosition[1] - (sideOfCenterHole + 2 * sideOfSmallSquares) * vertDisplacement
h2 = centerPosition[1] - (sideOfCenterHole + sideOfSmallSquares) * vertDisplacement
h3 = centerPosition[1] - sideOfCenterHole * vertDisplacement
h4 = centerPosition[1]
h5 = centerPosition[1] + sideOfCenterHole * vertDisplacement
h6 = centerPosition[1] + (sideOfCenterHole + sideOfSmallSquares) * vertDisplacement
h7 = centerPosition[1] + (sideOfCenterHole + 2 * sideOfSmallSquares) * vertDisplacement
# building the graph; points are named from left to right, top to bottom
p1 = 'a' + str(i)
p2 = 'b' + str(i)
p3 = 'c' + str(i)
p4 = 'd' + str(i)
p5 = 'e' + str(i)
p6 = 'f' + str(i)
p7 = 'g' + str(i)
p8 = 'h' + str(i)
p9 = 'i' + str(i)
p10 = 'j' + str(i)
p11 = 'k' + str(i)
p12 = 'l' + str(i)
p13 = 'm' + str(i)
p14 = 'n' + str(i)
p15 = 'o' + str(i)
p16 = 'p' + str(i)
p17 = 'q' + str(i)
p18 = 'r' + str(i)
p19 = 's' + str(i)
p20 = 't' + str(i)
p21 = 'u' + str(i)
p22 = 'v' + str(i)
p23 = 'w' + str(i)
p24 = 'x' + str(i)
p25 = 'y' + str(i)
p26 = 'z' + str(i)
p27 = 'A' + str(i)
p28 = 'B' + str(i)
p29 = 'C' + str(i)
p30 = 'D' + str(i)
p31 = 'E' + str(i)
p32 = 'F' + str(i)
aCn.add_vertex(p1, np.array([v2, h7]))
aCn.add_vertex(p2, np.array([v4, h7]))
aCn.add_vertex(p3, np.array([v6, h7]))
aCn.add_vertex(p4, np.array([v1, h6]))
aCn.add_vertex(p5, np.array([v2, h6]))
aCn.add_vertex(p6, np.array([v3, h6]))
aCn.add_vertex(p7, np.array([v4, h6]))
aCn.add_vertex(p8, np.array([v5, h6]))
aCn.add_vertex(p9, np.array([v6, h6]))
aCn.add_vertex(p10, np.array([v7, h6]))
aCn.add_vertex(p11, np.array([v2, h5]))
aCn.add_vertex(p12, np.array([v4, h5]))
aCn.add_vertex(p13, np.array([v6, h5]))
aCn.add_vertex(p14, np.array([v1, h4]))
aCn.add_vertex(p15, np.array([v2, h4]))
aCn.add_vertex(p16, np.array([v3, h4]))
aCn.add_vertex(p17, np.array([v5, h4]))
aCn.add_vertex(p18, np.array([v6, h4]))
aCn.add_vertex(p19, np.array([v7, h4]))
aCn.add_vertex(p20, np.array([v2, h3]))
aCn.add_vertex(p21, np.array([v4, h3]))
aCn.add_vertex(p22, np.array([v6, h3]))
aCn.add_vertex(p23, np.array([v1, h2]))
aCn.add_vertex(p24, np.array([v2, h2]))
aCn.add_vertex(p25, np.array([v3, h2]))
aCn.add_vertex(p26, np.array([v4, h2]))
aCn.add_vertex(p27, np.array([v5, h2]))
aCn.add_vertex(p28, np.array([v6, h2]))
aCn.add_vertex(p29, np.array([v7, h2]))
aCn.add_vertex(p30, np.array([v2, h1]))
aCn.add_vertex(p31, np.array([v4, h1]))
aCn.add_vertex(p32, np.array([v6, h1]))
aCn.add_edge(p1, p5)
aCn.add_edge(p2, p7)
aCn.add_edge(p3, p9)
aCn.add_edge(p4, p5)
aCn.add_edge(p5, p6)
aCn.add_edge(p5, p11)
aCn.add_edge(p6, p7)
aCn.add_edge(p7, p8)
aCn.add_edge(p7, p12)
aCn.add_edge(p8, p9)
aCn.add_edge(p9, p10)
aCn.add_edge(p9, p13)
aCn.add_edge(p11, p15)
aCn.add_edge(p13, p18)
aCn.add_edge(p14, p15)
aCn.add_edge(p15, p16)
aCn.add_edge(p17, p18)
aCn.add_edge(p18, p19)
aCn.add_edge(p15, p20)
aCn.add_edge(p18, p22)
aCn.add_edge(p20, p24)
aCn.add_edge(p22, p28)
aCn.add_edge(p23, p24)
aCn.add_edge(p24, p25)
aCn.add_edge(p24, p30)
aCn.add_edge(p25, p26)
aCn.add_edge(p21, p26)
aCn.add_edge(p26, p31)
aCn.add_edge(p26, p27)
aCn.add_edge(p27, p28)
aCn.add_edge(p28, p29)
aCn.add_edge(p28, p32)
# applying harmonic
# this part could be hard coded if it causes performance issues
for v in aCn.vertices:
xpos = aCn.vertices[v][1][0]
ypos = aCn.vertices[v][1][1]
if ypos == h7:
aCn.vertices[v][2] = topHarmonic
elif ypos == h1:
aCn.vertices[v][2] = bottHarmonic
elif xpos == v1:
aCn.vertices[v][2] = leftHarmonic
elif xpos == v7:
aCn.vertices[v][2] = rightHarmonic
else:
aCn.vertices[v][2] = avgHarmonic
# add created graph to the final graph
aCn_plus_one.add_graph(aCn)
i += 1
# remove redundancies and apply harmonic
aCn_plus_one.remove_redundancies()
aCn_plus_one.apply_harmonic_function_affine(numRuns=numRuns, setInitialValues=False)
file.write(str(numRuns) + "runs\n")
for v in aCn_plus_one.vertices:
if len(aCn_plus_one.vertices[v][0]) == 4:
# parameters are as follows: position, displacement, and harmonic values
# position: where the center of the graph is
# displacement: each center has 4 neighbors perpendicular to them, so the first value is top/bottom
# displacement and second is left/right
# harmonic values: the harmonic values of the point above, below, left, and right neighbors of center
savedParameters = [[None, None], [None, None], [None, None, None, None]]
savedParameters[0][0] = str(aCn_plus_one.vertices[v][1][0])
savedParameters[0][1] = str(aCn_plus_one.vertices[v][1][1])
for n in aCn_plus_one.vertices[v][0]:
distance = aCn_plus_one.vertices[v][1] - aCn_plus_one.vertices[n][1]
if np.isclose(distance[0], 0.0):
distance[0] = 0.0
if np.isclose(distance[1], 0.0):
distance[1] = 0.0
if distance[0] == 0: # must be top or bottom, as the x values were the same
if distance[1] < 0: # must be top, as the neighbor has a higher y value
savedParameters[2][0] = str(aCn_plus_one.vertices[n][2])
elif distance[1] > 0: # must be bottom, as the neighbor has a lower y value
savedParameters[1][0] = str(distance[1])
savedParameters[2][1] = str(aCn_plus_one.vertices[n][2])
elif distance[1] == 0: # must be left or right, as the y values were the same
if distance[0] < 0: # must be right, as the neighbor has a higher x value
savedParameters[2][3] = str(aCn_plus_one.vertices[n][2]) + '\n'
elif distance[0] > 0: # must be left, as the neighbor has a lower x value
savedParameters[1][1] = str(distance[0])
savedParameters[2][2] = str(aCn_plus_one.vertices[n][2])
listOfParameters = []
seperator = '|'
for parameter in savedParameters:
for e in parameter:
listOfParameters.append(e)
file.write(seperator.join(listOfParameters))
else: # code for x graph
for line in lowerCarpetData:
# create graph and list of data
aCn = gc.Graph()
parameters = list(line.split("|"))
parameters = [float(j) for j in parameters]
# data from line
centerPosition = np.array(parameters[0:2])
vertDisplacement = parameters[3]
horizontalDisplacement = parameters[2]
tlHarmonic = parameters[4]
blHarmonic = parameters[5]
brHarmonic = parameters[6]
trHarmonic = parameters[7]
avgHarmonic = sum(parameters[4:]) / 4
tHarmonic = (tlHarmonic + trHarmonic)/2
bHarmonic = (blHarmonic + brHarmonic)/2
rHarmonic = (trHarmonic + brHarmonic)/2
lHarmonic = (tlHarmonic + blHarmonic)/2
# convert to data for making graph
# each point is in a certain vertical (v) and horizontal (h) file
v1 = centerPosition[0] - (sideOfCenterHole + 2 * sideOfSmallSquares) * horizontalDisplacement
v2 = centerPosition[0] - (sideOfCenterHole + sideOfSmallSquares) * horizontalDisplacement
v3 = centerPosition[0] - sideOfCenterHole * horizontalDisplacement
v4 = centerPosition[0]
v5 = centerPosition[0] + sideOfCenterHole * horizontalDisplacement
v6 = centerPosition[0] + (sideOfCenterHole + sideOfSmallSquares) * horizontalDisplacement
v7 = centerPosition[0] + (sideOfCenterHole + 2 * sideOfSmallSquares) * horizontalDisplacement
h1 = centerPosition[1] - (sideOfCenterHole + 2 * sideOfSmallSquares) * vertDisplacement
h2 = centerPosition[1] - (sideOfCenterHole + sideOfSmallSquares) * vertDisplacement
h3 = centerPosition[1] - sideOfCenterHole * vertDisplacement
h4 = centerPosition[1]
h5 = centerPosition[1] + sideOfCenterHole * vertDisplacement
h6 = centerPosition[1] + (sideOfCenterHole + sideOfSmallSquares) * vertDisplacement
h7 = centerPosition[1] + (sideOfCenterHole + 2 * sideOfSmallSquares) * vertDisplacement
# building the graph
a = 'a' + str(i)
b = 'b' + str(i)
c = 'c' + str(i)
d = 'd' + str(i)
e = 'ec' + str(i)
f = 'fc' + str(i)
g = 'gc' + str(i)
h = 'h' + str(i)
ii = 'i' + str(i)
j = 'j' + str(i)
k = 'k' + str(i)
l = 'lc' + str(i)
m = 'mc' + str(i)
n = 'n' + str(i)
o = 'o' + str(i)
p = 'p' + str(i)
q = 'q' + str(i)
r = 'rc' + str(i)
s = 'sc' + str(i)
t = 'tc' + str(i)
u = 'u' + str(i)
v = 'v' + str(i)
w = 'w' + str(i)
x = 'x' + str(i)
aCn.add_vertex(a, np.array([v1, h7]))
aCn.add_vertex(b, np.array([v3, h7]))
aCn.add_vertex(c, np.array([v5, h7]))
aCn.add_vertex(d, np.array([v7, h7]))
aCn.add_vertex(e, np.array([v2, h6]))
aCn.add_vertex(f, np.array([v4, h6]))
aCn.add_vertex(g, np.array([v6, h6]))
aCn.add_vertex(h, np.array([v1, h5]))
aCn.add_vertex(ii, np.array([v3, h5]))
aCn.add_vertex(j, np.array([v5, h5]))
aCn.add_vertex(k, np.array([v7, h5]))
aCn.add_vertex(l, np.array([v2, h4]))
aCn.add_vertex(m, np.array([v6, h4]))
aCn.add_vertex(n, np.array([v1, h3]))
aCn.add_vertex(o, np.array([v3, h3]))
aCn.add_vertex(p, np.array([v5, h3]))
aCn.add_vertex(q, np.array([v7, h3]))
aCn.add_vertex(r, np.array([v2, h2]))
aCn.add_vertex(s, np.array([v4, h2]))
aCn.add_vertex(t, np.array([v6, h2]))
aCn.add_vertex(u, np.array([v1, h1]))
aCn.add_vertex(v, np.array([v3, h1]))
aCn.add_vertex(w, np.array([v5, h1]))
aCn.add_vertex(x, np.array([v7, h1]))
aCn.add_edge(a, e)
aCn.add_edge(b, e)
aCn.add_edge(h, e)
aCn.add_edge(ii, e)
aCn.add_edge(b, f)
aCn.add_edge(c, f)
aCn.add_edge(ii, f)
aCn.add_edge(j, f)
aCn.add_edge(c, g)
aCn.add_edge(d, g)
aCn.add_edge(j, g)
aCn.add_edge(k, g)
aCn.add_edge(h, l)
aCn.add_edge(ii, l)
aCn.add_edge(n, l)
aCn.add_edge(o, l)
aCn.add_edge(j, m)
aCn.add_edge(k, m)
aCn.add_edge(p, m)
aCn.add_edge(q, m)
aCn.add_edge(n, r)
aCn.add_edge(o, r)
aCn.add_edge(u, r)
aCn.add_edge(v, r)
aCn.add_edge(o, s)
aCn.add_edge(v, s)
aCn.add_edge(p, s)
aCn.add_edge(w, s)
aCn.add_edge(p, t)
aCn.add_edge(w, t)
aCn.add_edge(q, t)
aCn.add_edge(x, t)
# applying harmonic
# this part could be hard coded if it causes performance issues
for v in aCn.vertices:
xpos = aCn.vertices[v][1][0]
ypos = aCn.vertices[v][1][1]
if ypos == h7:
if xpos == v1:
aCn.vertices[v][2] = tlHarmonic
elif xpos == v7:
aCn.vertices[v][2] = trHarmonic
else:
aCn.vertices[v][2] = tHarmonic
elif ypos == h1:
if xpos == v1:
aCn.vertices[v][2] = blHarmonic
elif xpos == v7:
aCn.vertices[v][2] = brHarmonic
else:
aCn.vertices[v][2] = bHarmonic
elif xpos == v1:
aCn.vertices[v][2] = lHarmonic
elif xpos == v7:
aCn.vertices[v][2] = rHarmonic
else:
aCn.vertices[v][2] = avgHarmonic
# add created graph to the final graph
aCn_plus_one.add_graph(aCn)
i += 1
# remove redundancies and apply harmonic
aCn_plus_one.remove_redundancies()
aCn_plus_one.apply_harmonic_function_affine(numRuns=numRuns, setInitialValues=False)
file.write(str(numRuns) + "runs\n")
for v in aCn_plus_one.vertices:
if v[1] == 'c':
# parameters are as follows: position, displacement, and harmonic values
# position: where the center of the graph is
# displacement: how far each point is from the center (they're all the same displacement, just different
# signs in where; first number is horizontal, second is vertical
# harmonic values: the harmonic values of the point top left, bottom left, bottom right, and top right
# neighbors of center
savedParameters = [[None, None], [None, None], [None, None, None, None]]
savedParameters[0][0] = str(aCn_plus_one.vertices[v][1][0])
savedParameters[0][1] = str(aCn_plus_one.vertices[v][1][1])
neigh = aCn_plus_one.vertices[v][0][0] # doesn't matter which neighbor used
distance = aCn_plus_one.vertices[v][1] - aCn_plus_one.vertices[neigh][1]
savedParameters[1][0] = abs(distance[0])
savedParameters[1][1] = abs(distance[1])
for n in aCn_plus_one.vertices[v][0]:
disN = aCn_plus_one.vertices[n][1] - aCn_plus_one.vertices[v][1]
if np.isclose(disN[0], 0.0):
disN[0] = 0.0
if np.isclose(disN[1], 0.0):
disN[1] = 0.0
if disN[0] < 0: # must be on left
if disN[1] > 0: # must be top left
savedParameters[2][0] = aCn_plus_one.vertices[n][2]
else: # bottom left
savedParameters[2][1] = aCn_plus_one.vertices[n][2]
else: # must be on right
if disN[1] > 0: # must be top right
savedParameters[2][3] = aCn_plus_one.vertices[n][2]
else: # bottom right
savedParameters[2][2] = aCn_plus_one.vertices[n][2]
listOfParameters = []
seperator = '|'
for parameter in savedParameters:
for e in parameter:
listOfParameters.append(str(e))
listOfParameters[-1] = listOfParameters[-1] + '\n'
file.write(seperator.join(listOfParameters))
print("Program took ", time.time() - starttime)
| 2.96875
| 3
|
scripts/estimate_median_freq.py
|
rocksat/jsis3d
| 180
|
12777846
|
import os
import sys
import h5py
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--root', help='path to root directory')
args = parser.parse_args()
root = args.root
fname = os.path.join(root, 'metadata/train.txt')
flist = [os.path.join(root, 'h5', line.strip())
for line in open(fname, 'r')]
fname = os.path.join(root, 'metadata', 'classes.txt')
classes = [line.strip() for line in open(fname, 'r')]
num_classes = len(classes)
sizes = np.zeros(num_classes)
total = np.zeros(num_classes)
for fname in flist:
print('> Processing {}...'.format(fname))
fin = h5py.File(fname)
coords = fin['coords'][:]
points = fin['points'][:]
labels = fin['labels'][:]
labels = labels.reshape(-1, 2)
num_points = labels.shape[0]
for i in range(num_classes):
indices = (labels[:, 0] == i)
size = np.sum(indices)
sizes[i] += size
if size == 0: continue
total[i] += num_points
freq = sizes / total
weight = np.median(freq) / freq
fname = os.path.join(root, 'metadata', 'weight.txt')
print('> Saving statistics to {}...'.format(fname))
np.savetxt(fname, weight, fmt='%f')
| 2.28125
| 2
|
leetcode/longest_substring.py
|
paulsok/python_tricks
| 0
|
12777847
|
<filename>leetcode/longest_substring.py<gh_stars>0
# Input: s = "abcabcbb"
# Output: 3
# Explanation: The answer is "abc", with the length of 3.
# naive
# class Solution:
# def check(s, start, end):
# chars = [0] * 128
# for i in range(start, end + 1):
# c = s[i]
# chars[ord(c)] += 1
# if chars[ord(c)] > 1:
# return False
# return True
# def lengthOfLongestSubstring(self, s: str) -> int:
# n = len(s)
# res = 0
# for i in range(n):
# for j in range(i, n):
# if check(s, i, j):
# res = max(res, j - i + 1)
# return res
# sliding window
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
n = len(s)
ans = 0
# mp stores the current index of a character
mp = {}
i = 0
# try to extend the range [i, j]
for j in range(n):
if s[j] in mp:
i = max(mp[s[j]], i)
ans = max(ans, j - i + 1)
mp[s[j]] = j + 1
return ans
| 3.65625
| 4
|
third_party/blink/renderer/build/scripts/blinkbuild/name_style_converter_test.py
|
zipated/src
| 2,151
|
12777848
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=import-error,print-statement,relative-import,protected-access
"""Unit tests for name_style_converter.py."""
import unittest
from name_style_converter import NameStyleConverter
from name_style_converter import tokenize_name
class SmartTokenizerTest(unittest.TestCase):
def test_simple_cases(self):
self.assertEqual(tokenize_name('foo'), ['foo'])
self.assertEqual(tokenize_name('fooBar'), ['foo', 'Bar'])
self.assertEqual(tokenize_name('fooBarBaz'), ['foo', 'Bar', 'Baz'])
self.assertEqual(tokenize_name('Baz'), ['Baz'])
self.assertEqual(tokenize_name(''), [])
self.assertEqual(tokenize_name('FOO'), ['FOO'])
self.assertEqual(tokenize_name('foo2'), ['foo', '2'])
def test_tricky_cases(self):
self.assertEqual(tokenize_name('XMLHttpRequest'), ['XML', 'Http', 'Request'])
self.assertEqual(tokenize_name('HTMLElement'), ['HTML', 'Element'])
self.assertEqual(tokenize_name('WebGLRenderingContext'),
['WebGL', 'Rendering', 'Context'])
self.assertEqual(tokenize_name('CanvasRenderingContext2D'),
['Canvas', 'Rendering', 'Context', '2D'])
self.assertEqual(tokenize_name('CanvasRenderingContext2DAPITest'),
['Canvas', 'Rendering', 'Context', '2D', 'API', 'Test'])
self.assertEqual(tokenize_name('SVGSVGElement'), ['SVG', 'SVG', 'Element'])
self.assertEqual(tokenize_name('CanvasRenderingContext2D'),
['Canvas', 'Rendering', 'Context', '2D'])
self.assertEqual(tokenize_name('CSSURLImageValue'), ['CSS', 'URL', 'Image', 'Value'])
self.assertEqual(tokenize_name('CSSPropertyAPID'), ['CSS', 'Property', 'API', 'D'])
self.assertEqual(tokenize_name('AXARIAGridCell'), ['AX', 'ARIA', 'Grid', 'Cell'])
self.assertEqual(tokenize_name('CDATASection'), ['CDATA', 'Section'])
self.assertEqual(tokenize_name('ASCIICType'), ['ASCII', 'CType'])
self.assertEqual(tokenize_name('CString'), ['CString'])
self.assertEqual(tokenize_name('HTMLDListElement'), ['HTML', 'DList', 'Element'])
self.assertEqual(tokenize_name('HTMLOListElement'), ['HTML', 'OList', 'Element'])
self.assertEqual(tokenize_name('HTMLIFrameElement'), ['HTML', 'IFrame', 'Element'])
self.assertEqual(tokenize_name('HTMLPlugInElement'), ['HTML', 'PlugIn', 'Element'])
# No special handling for OptGroup, FieldSet, and TextArea.
self.assertEqual(tokenize_name('HTMLOptGroupElement'), ['HTML', 'Opt', 'Group', 'Element'])
self.assertEqual(tokenize_name('HTMLFieldSetElement'), ['HTML', 'Field', 'Set', 'Element'])
self.assertEqual(tokenize_name('HTMLTextAreaElement'), ['HTML', 'Text', 'Area', 'Element'])
self.assertEqual(tokenize_name('Path2D'), ['Path', '2D'])
self.assertEqual(tokenize_name('Point2D'), ['Point', '2D'])
self.assertEqual(tokenize_name('CanvasRenderingContext2DState'),
['Canvas', 'Rendering', 'Context', '2D', 'State'])
self.assertEqual(tokenize_name('Accelerated2dCanvas'), ['Accelerated', '2d', 'Canvas'])
self.assertEqual(tokenize_name('RTCDTMFSender'), ['RTC', 'DTMF', 'Sender'])
self.assertEqual(tokenize_name('WebGLCompressedTextureS3TCsRGB'),
['WebGL', 'Compressed', 'Texture', 'S3TC', 'sRGB'])
self.assertEqual(tokenize_name('WebGL2CompressedTextureETC1'),
['WebGL2', 'Compressed', 'Texture', 'ETC1'])
self.assertEqual(tokenize_name('EXTsRGB'), ['EXT', 'sRGB'])
# 'PVRTC' contains a special token 'RTC', but it should be a
# single token.
self.assertEqual(tokenize_name('WebGLCompressedTexturePVRTC'),
['WebGL', 'Compressed', 'Texture', 'PVRTC'])
self.assertEqual(tokenize_name('SVGFEBlendElement'), ['SVG', 'FE', 'Blend', 'Element'])
self.assertEqual(tokenize_name('SVGMPathElement'), ['SVG', 'MPath', 'Element'])
self.assertEqual(tokenize_name('SVGTSpanElement'), ['SVG', 'TSpan', 'Element'])
self.assertEqual(tokenize_name('SVGURIReference'), ['SVG', 'URI', 'Reference'])
self.assertEqual(tokenize_name('UTF16TextIterator'), ['UTF16', 'Text', 'Iterator'])
self.assertEqual(tokenize_name('UTF8Decoder'), ['UTF8', 'Decoder'])
self.assertEqual(tokenize_name('Uint8Array'), ['Uint8', 'Array'])
self.assertEqual(tokenize_name('DOMWindowBase64'), ['DOM', 'Window', 'Base64'])
self.assertEqual(tokenize_name('TextCodecLatin1'), ['Text', 'Codec', 'Latin1'])
self.assertEqual(tokenize_name('V8BindingForCore'), ['V8', 'Binding', 'For', 'Core'])
self.assertEqual(tokenize_name('V8DOMRect'), ['V8', 'DOM', 'Rect'])
self.assertEqual(tokenize_name('String16MojomTraits'), ['String16', 'Mojom', 'Traits'])
self.assertEqual(tokenize_name('V0InsertionPoint'), ['V0', 'Insertion', 'Point'])
self.assertEqual(tokenize_name('ShadowDOMV0Test'), ['Shadow', 'DOM', 'V0', 'Test'])
self.assertEqual(tokenize_name('ElementShadowV0'), ['Element', 'Shadow', 'V0'])
self.assertEqual(tokenize_name('StubChromeClientForSPv2'),
['Stub', 'Chrome', 'Client', 'For', 'SPv2'])
self.assertEqual(tokenize_name('SQLiteAuthorizer'), ['SQLite', 'Authorizer'])
self.assertEqual(tokenize_name('XPathEvaluator'), ['XPath', 'Evaluator'])
self.assertEqual(tokenize_name('IsXHTMLDocument'), ['Is', 'XHTML', 'Document'])
self.assertEqual(tokenize_name('isHTMLDocument'), ['is', 'HTML', 'Document'])
self.assertEqual(tokenize_name('matrix3d'), ['matrix', '3d'])
def test_ignoring_characters(self):
self.assertEqual(tokenize_name('Animation.idl'), ['Animation', 'idl'])
self.assertEqual(tokenize_name('-webkit-appearance'), ['webkit', 'appearance'])
self.assertEqual(tokenize_name(' foo_bar!#"$'), ['foo', 'bar'])
class NameStyleConverterTest(unittest.TestCase):
def test_snake_case(self):
converter = NameStyleConverter('HTMLElement')
self.assertEqual(converter.to_snake_case(), 'html_element')
def test_upper_camel_case(self):
converter = NameStyleConverter('someSuperThing')
self.assertEqual(converter.to_upper_camel_case(), 'SomeSuperThing')
converter = NameStyleConverter('SVGElement')
self.assertEqual(converter.to_upper_camel_case(), 'SVGElement')
converter = NameStyleConverter('cssExternalScannerPreload')
self.assertEqual(converter.to_upper_camel_case(), 'CSSExternalScannerPreload')
converter = NameStyleConverter('xpathExpression')
self.assertEqual(converter.to_upper_camel_case(), 'XPathExpression')
converter = NameStyleConverter('feDropShadow')
self.assertEqual(converter.to_upper_camel_case(), 'FEDropShadow')
def test_lower_camel_case(self):
converter = NameStyleConverter('someSuperThing')
self.assertEqual(converter.to_lower_camel_case(), 'someSuperThing')
converter = NameStyleConverter('SVGElement')
self.assertEqual(converter.to_lower_camel_case(), 'svgElement')
converter = NameStyleConverter('documentURI')
self.assertEqual(converter.to_lower_camel_case(), 'documentURI')
converter = NameStyleConverter('-webkit-margin-start')
self.assertEqual(converter.to_lower_camel_case(), 'webkitMarginStart')
converter = NameStyleConverter('Accelerated2dCanvas')
self.assertEqual(converter.to_lower_camel_case(), 'accelerated2dCanvas')
def test_macro_case(self):
converter = NameStyleConverter('WebGLBaz2D')
self.assertEqual(converter.to_macro_case(), 'WEBGL_BAZ_2D')
def test_all_cases(self):
converter = NameStyleConverter('SVGScriptElement')
self.assertEqual(converter.to_all_cases(), {
'snake_case': 'svg_script_element',
'upper_camel_case': 'SVGScriptElement',
'macro_case': 'SVG_SCRIPT_ELEMENT',
})
| 2.203125
| 2
|
Seconds Converter.py
|
WOLF2503/Python-Program
| 0
|
12777849
|
<filename>Seconds Converter.py
a = int(input("Please input you Time in Seconds to convert: "))
selection = input('Please Input the unit in which you want to convert For Seconds type S ... for Minutes type M... For Hours Type H')
if selection == "S" :
print("Your Time in Seconds is ", a)
elif selection == "M" :
b = a / 60
print("Your Time in Minutes is ", b)
elif selection == "H" :
c = a / 3600
print("Your Time in Hours is ", c)
else :
print("Invalid Input. Please check whether you have typed your value correctly and Try again. Note that the Units that you input are CASE SENSITIVE...")
| 4.3125
| 4
|
camelot/view/controls/editors/booleditor.py
|
FrDeGraux/camelot
| 12
|
12777850
|
<filename>camelot/view/controls/editors/booleditor.py
# ============================================================================
#
# Copyright (C) 2007-2016 Conceptive Engineering bvba.
# www.conceptive.be / <EMAIL>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Conceptive Engineering nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ============================================================================
from ....core.qt import QtGui, QtCore, Qt, QtWidgets
from .customeditor import AbstractCustomEditor
from camelot.core import constants
from camelot.core.utils import ugettext
class BoolEditor(QtWidgets.QCheckBox, AbstractCustomEditor):
"""Widget for editing a boolean field"""
editingFinished = QtCore.qt_signal()
def __init__(self,
parent=None,
minimum=constants.camelot_minint,
maximum=constants.camelot_maxint,
nullable=True,
field_name = 'boolean',
**kwargs):
QtWidgets.QCheckBox.__init__(self, parent)
AbstractCustomEditor.__init__(self)
self.setObjectName( field_name )
self._nullable = nullable
self.clicked.connect( self._state_changed )
def set_value(self, value):
value = AbstractCustomEditor.set_value(self, value)
if value:
self.setCheckState(Qt.Checked)
else:
self.setCheckState(Qt.Unchecked)
def get_value(self):
value_loading = AbstractCustomEditor.get_value(self)
if value_loading is not None:
return value_loading
state = self.checkState()
if state==Qt.Unchecked:
return False
return True
def set_field_attributes( self, editable = True, **kwargs ):
AbstractCustomEditor.set_field_attributes( self, **kwargs )
self.setDisabled( not editable )
@QtCore.qt_slot( bool )
def _state_changed(self, value=None):
if not self.hasFocus():
"""
Mac OS X's behaviour is not to move focus to a checkbox when it's
state is changed. Therefore, the text_input_editing_finished method
will not be called on a TextLineEditor when a checkbox is clicked
after a text line has been changed, thus leading to a loss of the
changes made in the text line. This issue is resolved by forcing
the focus to the checkbox here.
"""
self.setFocus()
self.editingFinished.emit()
def sizeHint(self):
size = QtWidgets.QComboBox().sizeHint()
return size
class TextBoolEditor(QtWidgets.QLabel, AbstractCustomEditor):
"""
:Parameter:
color_yes: string
text-color of the True representation
color_no: string
text-color of the False representation
"""
editingFinished = QtCore.qt_signal()
def __init__(self,
parent=None,
yes="Yes",
no="No",
color_yes=None,
color_no=None,
**kwargs):
QtWidgets.QLabel.__init__(self, parent)
AbstractCustomEditor.__init__(self)
self.setEnabled(False)
self.yes = ugettext(yes)
self.no = ugettext(no)
self.color_yes = color_yes
self.color_no = color_no
def set_value(self, value):
value = AbstractCustomEditor.set_value(self, value)
if value:
self.setText(self.yes)
if self.color_yes:
selfpalette = self.palette()
selfpalette.setColor(QtGui.QPalette.WindowText, self.color_yes)
self.setPalette(selfpalette)
else:
self.setText(self.no)
if self.color_no:
selfpalette = self.palette()
selfpalette.setColor(QtGui.QPalette.WindowText, self.color_no)
self.setPalette(selfpalette)
| 1.125
| 1
|