content stringlengths 5 1.05M |
|---|
from datetime import datetime
from app import db
from app.models.base.base import BaseModel
class WithdrawOauthAccountModel(db.Model, BaseModel):
__bind_key__ = "a_coffer"
__tablename__ = "withdraw_oauth_account"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, default=0)
mobile = db.Column(db.String(15), default="")
account = db.Column(db.String(20), default="")
nickname = db.Column(db.String(40), default="")
type = db.Column(db.Integer, default=0)
openid = db.Column(db.String(40), default="")
status = db.Column(db.Integer, default=0)
created_time = db.Column(db.DateTime, default=datetime.now)
updated_time = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
@staticmethod
def query_withdraw_oauth_account(user_id, auth_type):
result = WithdrawOauthAccountModel.query.filter_by(user_id=user_id, type=auth_type, status=1).first()
return result
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2020 The Pybricks Authors
"""
Hardware Module: 1
Description: Verifies the distance values of the Ultrasonic Sensor.
It rotates the motor to place an obstacle in front of the sensor to test
distance values. Then it rotates quickly to verify faster readings.
"""
from pybricks.pupdevices import Motor, UltrasonicSensor
from pybricks.parameters import Port
# Initialize devices.
motor = Motor(Port.A)
ultrasonic_sensor = UltrasonicSensor(Port.C)
# Detect object.
motor.run_target(500, 0)
distance = ultrasonic_sensor.distance()
assert distance < 100, "Expected < 100 mm, got {0}.".format(distance)
# Move object away.
motor.run_target(500, 180)
distance = ultrasonic_sensor.distance()
assert distance > 100, "Expected > 100 mm, got {0}.".format(distance)
# Prepare fast detection.
motor.reset_angle(0)
motor.run(700)
DETECTIONS = 5
# Wait for given number of detections.
for i in range(DETECTIONS):
# Wait for object to be detected.
while ultrasonic_sensor.distance() > 100:
pass
# Wait for object to move away.
angle_detected = motor.angle()
while motor.angle() < angle_detected + 180:
pass
# Assert that we have made as many turns.
rotations = round(motor.angle() / 360)
assert rotations == DETECTIONS, "Expected {0} turns, got {1}.".format(DETECTIONS, rotations)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# setup_teardown_test.py
# Note! run: pytest -v -s
# Class Example #
# Note: The setup class and teardown class methods
# have the @classmethod decorator applied, as they
# are passed in the uninstantiated class object
# rather than a unique instance of the class.
import pytest
class TestClass:
@classmethod
def setup_class(cls):
print("Setup TestClass!")
@classmethod
def teardown_class(cls):
print("Teardown TestClass!")
def setup_method(self, method):
if method == self.test1:
print("\nSetting up test1!")
elif method == self.test2:
print("\nSetting up test2!")
else:
print("\nSetting up unknown test!")
def teardown_method(self, method):
if method == self.test1:
print("\nTearing down test1!")
elif method == self.test2:
print("\nTearing down test2!")
else:
print("\nTearing down unknown test!")
def test1(self):
print("Executing test1!")
assert True
def test2(self):
print("Executing test2!")
assert True
# Module Example #
def setup_module(module):
print("Setup Module!")
return module
def teardown_module(module):
print("Teardown Module!")
return module
def setup_function(function):
if function == test1:
print("\nSetting up test1!")
elif function == test2:
print("\nSetting up test2!")
else:
print("\nSetting up unknown test!")
def teardown_function(function):
if function == test1:
print("\nTearing down test1!")
elif function == test2:
print("\nTearing down test2!")
else:
print("\nTearing down unknown test!")
def test1():
print("Executing test1!")
assert True
def test2():
print("Executing test2!")
assert True
|
# --coding--:utf-8 --
import cv2
import mediapipe as mp
import time
class PoseDetector:
def __init__(self, mode = False, upBody = False, smooth=True, detectionCon = 0.5, trackCon = 0.5):
self.mode = mode
self.upBody = upBody
self.smooth = smooth
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(self.mode, self.upBody, self.smooth, self.detectionCon, self.trackCon)
def findPose(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)
return img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS
def getPosition(self, img, draw=True):
lmList= []
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return lmList
pose = PoseDetector()
print(pose) |
#coding=utf-8
import random
import jieba
import nltk
def run():
pass
def gender_features(weibo):
a = jieba.cut(weibo[1:])
fl = (" ".join(a)).split()
fd = {}
count = 1
for i in fl:
fd[str(count)] = i
count += 1
# print fd
return fd
def readin(path):
fin = open(path,"r")
content = fin.readlines()
fin.close()
return content
# for line in content:
# try:
# print line.decode('utf8').encode('gbk')
# except Exception, e:
# print e
# print line
if __name__ == '__main__':
print "main function"
run()
content = readin("sentimentweibo.txt")
features = [(gender_features(line),line[0]) for line in content]
random.shuffle(features)
words = []
stop_words = readin("stopwords.txt")
stop_words = [w[:-1].decode('utf8') for w in stop_words]
# for i in stop_words:print i
# for i in stop_words:print i.decode('utf8').encode('gbk')
for line in content:
a = jieba.cut(line[1:])
fl = (" ".join(a)).split()
# for w in fl:
# if w not in stop_words:
# try:
# print w
# except Exception, e:
# print e
words = words + [w for w in fl if w not in stop_words]
# print words[0],words[1]
all_words = nltk.FreqDist(words)
word_features = all_words.keys()[:1000]
for i in all_words:
try:
print i,all_words[i]
except Exception, e:
pass
# rate = 0.75
# features_len = len(features)
# train_set = features[:int(rate*features_len)]
# test_set = features[int(rate*features_len):]
# classifier = nltk.NaiveBayesClassifier.train(train_set)
# print nltk.classify.accuracy(classifier,test_set)
# classifier.show_most_informative_features(5) |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2022-03-31",
}
with DAG(
dag_id="gbif.gcs_to_bq",
default_args=default_args,
max_active_runs=1,
schedule_interval="0 0 2 * *",
catchup=False,
default_view="graph",
) as dag:
# Load Parquet files to BQ
load_parquet_files_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_parquet_files_to_bq",
bucket="{{ var.json.gbif.source_bucket }}",
source_objects=[
"occurrence/{{ execution_date.strftime('%Y-%m-01') }}/occurrence.parquet/*"
],
source_format="PARQUET",
destination_project_dataset_table="gbif.occurrences",
write_disposition="WRITE_TRUNCATE",
)
load_parquet_files_to_bq
|
from django.forms import ModelForm
from .models import Order, Profile
class OrderForm(ModelForm):
class Meta:
model = Order
fields = ['user','service','payment_method']
class ProfileForm(ModelForm):
class Meta:
model = Profile
fields = ['name', 'photo', 'age', 'email'] |
"""generate_user_tables
Revision ID: 47e4599484a1
Revises: 89edb69f4be3
Create Date: 2018-05-10 21:33:01.943092
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '47e4599484a1'
down_revision = '89edb69f4be3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('sp_users', sa.Column('email', sa.String(length=255), nullable=True))
op.add_column('sp_users', sa.Column('password', sa.String(length=255), nullable=True))
op.add_column('sp_users', sa.Column('registration_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('sp_users', 'registration_at')
op.drop_column('sp_users', 'password')
op.drop_column('sp_users', 'email')
# ### end Alembic commands ###
|
import pandas as pd
import numpy as np
import numpy.linalg as LA
from numpy import sin,cos
from tqdm import tqdm
from functools import partial
from multiprocessing import Pool, cpu_count
from cvxopt import matrix, solvers, sparse,spdiag,spmatrix
import time
# TODO
# 1. get rid of uniform timestamps assumption -> resample()
# 2. unit conversion
# 3. read from data_q
# 4. write to a modified data_q
# impute timesttamps from stitched trajectories
# implement receding_horizon_1d_l1
solvers.options['show_progress'] = False
# ==================== CVX optimization for 2d dynamics ==================
def resample(car):
# resample timestamps to 30hz, leave nans for missing data
'''
resample the original time-series to uniformly sampled time series in 30Hz
car: document
'''
# Select time series only
time_series_field = ["timestamp", "x_position", "y_position"]
data = {key: car[key] for key in time_series_field}
# Read to dataframe and resample
df = pd.DataFrame(data, columns=data.keys())
df = df.set_index('timestamp')
df = df.resample('0.033333333S').mean() # close to 30Hz
df.index = df.index.values.astype('datetime64[ns]').astype('int64')*1e-9
car['x_position'] = df['x_position'].values
car['y_position'] = df['y_position'].values
car['timestamp'] = df.index.values
return car
def _blocdiag(X, n):
"""
makes diagonal blocs of X, for indices in [sub1,sub2]
n indicates the total number of blocks (horizontally)
"""
if not isinstance(X, spmatrix):
X = sparse(X)
a,b = X.size
if n==b:
return X
else:
mat = []
for i in range(n-b+1):
row = spmatrix([],[],[],(1,n))
row[i:i+b]=matrix(X,(b,1))
mat.append(row)
return sparse(mat)
def _getQPMatrices(x, t, args, reg="l2"):
'''
turn ridge regression (reg=l2)
1/M||y-Hx||_2^2 + \lam2/N ||Dx||_2^2
and elastic net regression (reg=l1)
1/M||y-Hx-e||_2^2 + \lam2/N ||Dx||_2^2 + \lam1/M||e||_1
to QP form
min 1/2 z^T Q x + p^T z + r
s.t. Gz <= h
input: x: data array with missing data
t: array of timestamps (no missing)
return: Q, p, H, (G, h if l1)
TODO: uneven timestamps
'''
# get data
N = len(x)
# non-missing entries
idx = [i.item() for i in np.argwhere(~np.isnan(x)).flatten()]
x = x[idx]
M = len(x)
# differentiation operator
# D1 = _blocdiag(matrix([-1,1],(1,2), tc="d"), N) * (1/dt)
# D2 = _blocdiag(matrix([1,-2,1],(1,3), tc="d"), N) * (1/dt**2)
D3 = _blocdiag(matrix([-1,3,-3,1],(1,4), tc="d"), N) * (1/dt**3)
if reg == "l2":
lam2 = args
DD = lam2 * D3.trans() * D3
# sol: xhat = (I+delta D'D)^(-1)x
I = spmatrix(1.0, range(N), range(N))
H = I[idx,:]
DD = lam2*D3.trans() * D3
HH = H.trans() * H
Q = 2*(HH/M+DD/N)
p = -2*H.trans() * matrix(x)/M
return Q, p, H, N, M
else:
lam2, lam1 = args
DD = lam2 * D3.trans() * D3
# define matices
I = spmatrix(1.0, range(N), range(N))
IM = spmatrix(1.0, range(M), range(M))
O = spmatrix([], [], [], (N,N))
OM = spmatrix([], [], [], (M,M))
H = I[idx,:]
HH = H.trans()*H
Q = 2*sparse([[HH/M+DD/N,H/M,-H/M], # first column of Q
[H.trans()/M,IM/M, -H*H.trans()/M],
[-H.trans()/M,-H*H.trans()/M,IM/M]])
p = 1/M * sparse([-2*H.trans()*matrix(x), -2*matrix(x)+lam1, 2*matrix(x)+lam1])
G = sparse([[H*O,H*O],[-IM,OM],[OM,-IM]])
h = spmatrix([], [], [], (2*M,1))
return Q, p, H, G, h, N,M
def rectify_1d(car, args, axis):
'''
solve solve for ||y-x||_2^2 + \lam ||Dx||_2^2
args: lam
axis: "x" or "y"
'''
# get data and matrices
x = car[axis + "_position"].values
Q, p, H, N,M = _getQPMatrices(x, 0, args, reg="l2")
sol=solvers.qp(P=Q, q=p)
print(sol["status"])
# extract result
xhat = sol["x"][:N]
return xhat
def rectify_1d_l1(car, args, axis):
'''
solve for ||y-Hx-e||_2^2 + \lam2 ||Dx||_2^2 + \lam1||e||_1
convert to quadratic programming with linear inequality constraints
handle sparse outliers in data
rewrite l1 penalty to linear constraints https://math.stackexchange.com/questions/391796/how-to-expand-equation-inside-the-l2-norm
'''
x = car[axis + "_position"].values
Q, p, H, G, h, N,M = _getQPMatrices(x, 0, args, reg="l1")
sol=solvers.qp(P=Q, q=matrix(p) , G=G, h=matrix(h))
# extract result
xhat = sol["x"][:N]
u = sol["x"][N:N+M]
v = sol["x"][N+M:]
print(sol["status"])
return xhat
def rectify_2d(car,args,reg = "l2"):
'''
rectify on x and y component independently
batch method
'''
if reg == "l2":
lamx, lamy = args
xhat = rectify_1d(car, lamx, "x")
yhat = rectify_1d(car, lamy, "y")
elif reg == "l1":
lamx, lamy, lam1 = args
xhat = rectify_1d_l1(car, (lamx, lam1), "x")
yhat = rectify_1d_l1(car, (lamy, lam1), "y")
# write to df
car['x_position'] = xhat
car['y_position'] = yhat
return car
# =================== RECEDING HORIZON RECTIFICATION =========================
def receding_horizon_1d(car, args, axis="x"):
'''
rolling horizon version of rectify_1d
car: dict
args: (lam, axis, PH, IH)
PH: prediction horizon
IH: implementation horizon
QP formulation with sparse matrix min ||y-x||_2^2 + \lam ||Dx||_2^2
'''
# TODO: compute matrices once
# get data
lam2, PH, IH = args
x = car[axis+"_position"]
n_total = len(x)
# Q, p, H, N,M = _getQPMatrices(x[:PH], 0, lam, reg="l2")
# sol=solvers.qp(P=Q, q=p)
# additional equality constraint: state continuity
A = sparse([[spmatrix(1.0, range(4), range(4))], [spmatrix([], [], [], (4,PH-4))]])
A = matrix(A, tc="d")
# save final answers
xfinal = matrix([])
n_win = max(0,(n_total-PH+IH)//IH)
last = False
cs = 3
for i in range(n_win+1):
# print(i,'/',n_total, flush=True)
if i == n_win: # last
xx =x[i*IH:]
last = True
else:
xx = x[i*IH: i*IH+PH]
nn = len(xx)
Q, p, H, N,M = _getQPMatrices(xx, 0, lam, reg="l2")
if i == 0:
sol=solvers.qp(P=Q, q=p)
else: # if x_prev exists - not first window
A = sparse([[spmatrix(1.0, range(cs), range(cs))], [spmatrix([], [], [], (cs,nn-cs))]])
A = matrix(A, tc="d")
b = matrix(x_prev)
sol=solvers.qp(P=Q, q=p, A = A, b=b)
xhat = sol["x"]
if last:
xfinal = matrix([xfinal, xhat])
else:
xfinal = matrix([xfinal, xhat[:IH]])
# save for the next loop
x_prev = xhat[IH:IH+cs]
return xfinal
def receding_horizon_2d(car,args):
'''
car: stitched fragments from data_q
TODO: parallelize x and y?
'''
# get data
lam2_x, lam2_y, PH, IH = args
xhat = receding_horizon_1d(car, (lam2_x, PH, IH), "x")
yhat = receding_horizon_1d(car, (lam2_y, PH, IH), "y")
car['x_position'] = xhat
car['y_position'] = yhat
return car
def receding_horizon_1d_l1(car, args, axis):
'''
rolling horizon version of rectify_1d_l1
car: dict
args: (lam1, lam2, PH, IH)
PH: prediction horizon
IH: implementation horizon
'''
# TODO: compute matrices once
# get data
lam1, lam2, PH, IH = args
x = car[axis+"_position"]
n_total = len(x)
# Q, p, H, G, h, N,M = _getQPMatrices(x, 0, args, reg="l1")
# sol=solvers.qp(P=Q, q=matrix(p) , G=G, h=matrix(h))
# additional equality constraint: state continuity
A = sparse([[spmatrix(1.0, range(4), range(4))], [spmatrix([], [], [], (4,PH-4))]])
A = matrix(A, tc="d")
# save final answers
xfinal = matrix([])
n_win = max(0,(n_total-PH+IH)//IH)
last = False
cs = 3
for i in range(n_win+1):
# print(i,'/',n_total, flush=True)
if i == n_win: # last
xx =x[i*IH:]
last = True
else:
xx = x[i*IH: i*IH+PH]
# nn = len(xx)
Q, p, H, G, h, N,M = _getQPMatrices(xx, 0, args, reg="l1")
if i == 0:
sol=solvers.qp(P=Q, q=matrix(p) , G=G, h=matrix(h))
else: # if x_prev exists - not first window
A = sparse([[spmatrix(1.0, range(cs), range(cs))], [spmatrix([], [], [], (cs,N-cs + 2*M))]])
A = matrix(A, tc="d")
b = matrix(x_prev)
sol=solvers.qp(P=Q, q=p, G=G, h=matrix(h), A = A, b=b)
xhat = sol["x"]
if last:
xfinal = matrix([xfinal, xhat])
else:
xfinal = matrix([xfinal, xhat[:IH]])
# save for the next loop
x_prev = xhat[IH:IH+cs]
return xfinal
def receding_horizon_2d_l1(car, lam1_x, lam1_y, lam2_x, lam2_y, PH, IH):
'''
car: stitched fragments from data_q
TODO: parallelize x and y?
'''
# get data
xhat = receding_horizon_1d_l1(car, (lam1_x, lam2_x, PH, IH), "x")
yhat = receding_horizon_1d_l1(car, (lam1_y, lam2_y, PH, IH), "y")
car['x_position'] = xhat
car['y_position'] = yhat
return car
# =============== need to be moved =================
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def generate_1d(initial_state, highest_order_dynamics, dt, order):
'''
generate vehicle states using 1-st order dynamics
x(k+1) = x(k)+v(k)dt
v(k+1) = v(k)+a(k)dt - if order==2
a(k+1) = a(k)+j(k)dt - if order==3
initial_state: list. [x0, v0, a0]
highest_order_dynamics: Nx1 array, acceleration, but only a[:-order] is used in dynamics
dt: Nx1 array
order: highest order of derivative. 2: acceleration. 3: jerk
return: x,v,a
'''
# TODO: vectorize this function
N = len(highest_order_dynamics)
if order == 3:
j = highest_order_dynamics
a = np.zeros(N)
x = np.zeros(N)
v = np.zeros(N)
x0,v0,a0 = initial_state
a[0] = a0
x[0] = x0
v[0] = v0
for k in range(0,N-1):
a[k+1] = a[k] + j[k]*dt
v[k+1] = v[k] + a[k]*dt
x[k+1] = x[k] + v[k]*dt
elif order == 2:
j = np.nan
a = highest_order_dynamics
x = np.zeros(N)
v = np.zeros(N)
x0,v0 = initial_state
x[0] = x0
v[0] = v0
for k in range(0,N-1):
v[k+1] = v[k] + a[k]*dt
x[k+1] = x[k] + v[k]*dt
return x,v,a,j
def generate_2d(initial_state, highest_order_dynamics, theta, dt, order):
'''
generate vehicle states using 1-st order dynamics in 2D
Simple steering dynamics:
a(k+1) = a(k) + j(k)*dt, k=1,...,N-3
v(k+1) = v(k) + a(k)*dt, k=1,...,N-2
vx(k) = v(k) sin(theta(k)), k=1,...,N-1
vy(k) = v(k) cos(theta(k)), k=1,...,N-1
x(k+1) = x(k) + vx(k)dt, k=1,...,N-1
y(k+1) = y(k) + vy(k)dt, k=1,...,N-1
initial_state: list. [x0, y0, v0, a0]
highest_order_dynamics: Nx1 array
theta: Nx1 array
dt: float or Nx1 array
order: highest order of derivative. 2: acceleration. 3: jerk
return: x,y,theta,v,a,j
'''
# TODO: vectorize this function
N = len(highest_order_dynamics)
if order == 3:
assert len(initial_state)==4
j = highest_order_dynamics
a = np.zeros(N)
v = np.zeros(N)
x = np.zeros(N)
y = np.zeros(N)
x0, y0, v0, a0 = initial_state
a[0] = a0
x[0] = x0
y[0] = y0
v[0] = v0
for k in range(0,N-1):
a[k+1] = a[k] + j[k]*dt
v[k+1] = v[k] + a[k]*dt
vx = v * np.cos(theta)
vy = v * np.sin(theta)
for k in range(0,N-1):
x[k+1] = x[k] + vx[k]*dt
y[k+1] = y[k] + vy[k]*dt
elif order == 2:
assert len(initial_state)==3
j = np.nan
a = highest_order_dynamics
v = np.zeros(N)
x = np.zeros(N)
y = np.zeros(N)
x0, y0, v0 = initial_state
x[0] = x0
y[0] = y0
v[0] = v0
for k in range(0,N-1):
v[k+1] = v[k] + a[k]*dt
vx = v * np.cos(theta)
vy = v * np.sin(theta)
for k in range(0,N-1):
x[k+1] = x[k] + vx[k]*dt
y[k+1] = y[k] + vy[k]*dt
return x,y,theta,v,a,j
def decompose_2d(car, write_to_df = False):
'''
the opposite of generate_2d
given x,y,theta
return vx,vy,ax,ay,jx,jy
get velocity, acceleration, jerk from the GT box measurements
'''
try:
x = (car["bbr_x"].values + car["bbl_x"].values)/2
y = (car["bbr_y"].values + car["bbl_y"].values)/2
except:
x = car.x.values
y = car.y.values
vx = np.append(np.diff(x)/dt, np.nan)
vy = np.append(np.diff(y)/dt, np.nan)
ax = np.append(np.diff(vx)/dt, np.nan)
ay = np.append(np.diff(vy)/dt, np.nan)
jx = np.append(np.diff(ax)/dt, np.nan)
jy = np.append(np.diff(ay)/dt, np.nan)
if write_to_df:
car.loc[:,"speed_x"] = vx
car.loc[:,"speed_y"] = vy
car.loc[:,"acceleration_x"] = ax
car.loc[:,"acceleration_y"] = ay
car.loc[:,"jerk_x"] = jx
car.loc[:,"jerk_y"] = jy
return car
else:
return vx,vy,ax,ay,jx,jy
def rectify_sequential(df, args):
print("{} total trajectories, {} total measurements".format(df['ID'].nunique(), len(df)))
start = time.time()
df = df.groupby('ID').apply(rectify_single_car, args=args).reset_index(drop=True)
end = time.time()
print('total time rectify_sequential: ',end-start)
return df
def generate_box(w,l, x, y, theta):
'''
generate 'bbr_x','bbr_y', 'fbr_x','fbr_y','fbl_x','fbl_y','bbl_x', 'bbl_y' from
- x: Nx1 array of backcenter x
- y: Nx1 array of backcenter y
- theta: Nx1 array of angle relative to positive x direction (not steering)
- w: width
- l: length
'''
# compute positions
xa = x + w/2*sin(theta)
ya = y - w/2*cos(theta)
xb = xa + l*cos(theta)
yb = ya + l*sin(theta)
xc = xb - w*sin(theta)
yc = yb + w*cos(theta)
xd = xa - w*sin(theta)
yd = ya + w*cos(theta)
Y = np.stack([xa,ya,xb,yb,xc,yc,xd,yd],axis=-1)
return Y
def rectify_single_car(car, args):
'''
car: a document (dict)
'''
width = car['width']
length = car['length']
car = receding_horizon_2d(car, width, length, args)
return car
def receding_horizon_1d_original(df, args, axis="x"):
'''
rolling horizon version of rectify_1d
car: df
args: (lam, axis, PH, IH)
PH: prediction horizon
IH: implementation horizon
QP formulation with sparse matrix min ||y-x||_2^2 + \lam ||Dx||_2^2
'''
# get data
lam, PH, IH = args
x = df[axis].values
n_total = len(x)
# Define constraints for the first PH
idx = [i.item() for i in np.argwhere(~np.isnan(x[:PH])).flatten()]
if len(idx) < 2: # not enough measurements
print('not enough measurements in receding_horizon_1d')
return
xx = x[:PH]
xx = xx[idx]
# differentiation operator
D1 = _blocdiag(matrix([-1,1],(1,2), tc="d"), PH) * (1/dt)
D2 = _blocdiag(matrix([1,-2,1],(1,3), tc="d"), PH) * (1/dt**2)
D3 = _blocdiag(matrix([-1,3,-3,1],(1,4), tc="d"), PH) * (1/dt**3)
# sol: xhat = (I+delta D'D)^(-1)x
I = spmatrix(1.0, range(PH), range(PH))
H = I[idx,:]
M = len(idx)
DD = lam*D3.trans() * D3
HH = H.trans() * H
# QP formulation with sparse matrix min ||y-x||_2^2 + \lam ||Dx||_2^2
Q = 2*(HH+DD)
p = -2*H.trans() * matrix(xx)
sol=solvers.qp(P=Q, q=p)
# additional equality constraint: state continuity
A = sparse([[spmatrix(1.0, range(4), range(4))], [spmatrix([], [], [], (4,PH-4))]])
A = matrix(A, tc="d")
# save final answers
xfinal = matrix([])
vfinal = matrix([])
afinal = matrix([])
jfinal = matrix([])
n_win = max(0,(n_total-PH+IH)//IH)
last = False
cs = 3 # new constraint steps
for i in range(n_win+1):
# print(i,'/',n_total, flush=True)
if i == n_win: # last
xx =x[i*IH:]
last = True
else:
xx = x[i*IH: i*IH+PH]
nn = len(xx)
idx = [i.item() for i in np.argwhere(~np.isnan(xx)).flatten()]
xx = xx[idx]
I = I[:nn, :nn]
H = I[idx,:]
D1 = D1[:nn-1 ,:nn]
D2 = D2[:nn-2 ,:nn]
D3 = D3[:nn-3 ,:nn]
DD = lam*D3.trans() * D3
HH = H.trans() * H
Q = 2*(HH+DD)
p = -2*H.trans() * matrix(xx)
if i == 0:
sol=solvers.qp(P=Q, q=p)
else: # if x_prev exists - not first window
A = sparse([[spmatrix(1.0, range(cs), range(cs))], [spmatrix([], [], [], (cs,nn-cs))]])
A = matrix(A, tc="d")
b = matrix(x_prev)
sol=solvers.qp(P=Q, q=p, A = A, b=b)
xhat = sol["x"]
vhat = D1*xhat
ahat = D2*xhat
jhat = D3*xhat
if last:
xfinal = matrix([xfinal, xhat])
vfinal = matrix([vfinal, vhat, np.nan])
afinal = matrix([afinal, ahat, matrix([np.nan, np.nan])])
jfinal = matrix([jfinal, jhat, matrix([np.nan, np.nan, np.nan])])
else:
xfinal = matrix([xfinal, xhat[:IH]])
vfinal = matrix([vfinal, vhat[:IH]])
afinal = matrix([afinal, ahat[:IH]])
jfinal = matrix([jfinal, jhat[:IH]])
# save for the next loop
x_prev = xhat[IH:IH+cs]
return xfinal, vfinal, afinal, jfinal
# ===================== non-convex version =======================
def box_fitting(car, width, length):
'''
fit to measurements with the given width length
output x,y -> best fit back center coordinates
convert the problem into 2D point movement
car: df that has raw boxes measuarement
return car with best-fit x and y
TODO: test with missing data
TODO: consider direction
'''
# Decision variables 8N x 1
print("in box_fitting")
pts = ['bbr_x','bbr_y', 'fbr_x','fbr_y','fbl_x','fbl_y','bbl_x', 'bbl_y']
Y = np.array(car[pts])
N = len(Y)
notNan = ~np.isnan(np.sum(Y,axis=-1))
dir = car.direction.values[0]
# Objective function - ||X-Xdata||_2^2
def sos2(x,x_data):
# x: 4 x 1, x_data: 8x1
fbr_x, fbr_y, bbl_x, bbl_y = x
x = np.array([bbl_x, fbr_y, fbr_x, fbr_y, fbr_x, bbl_y, bbl_x, bbl_y])
return LA.norm(x-x_data,2)**2
# only consider diagonal points
A = np.array([[1,0,-1,0],
[0,1,0,-1]])
b = np.array([length, -width]).T
b = np.sign(dir) * b
eq_cons = {'type': 'eq',
'fun' : lambda x: np.dot(A, x) - b}
# Solve boxes are fixed-dimension rectangles, steering = 0
x_opt = np.ones(N)*np.nan
y_opt = np.ones(N)*np.nan
Yre = np.ones(Y.shape) * np.nan
for i, X_data in enumerate(Y):
X = X_data [[2,3,6,7]] # fbr_x, fbr_y, bbl_x, bbl_y
if ~np.isnan(np.sum(X)):
res = minimize(sos2, X, (X_data), method='SLSQP',
constraints=[eq_cons], options={'disp': False})
fbr_x, fbr_y, bbl_x, bbl_y = res.x
x_opt[i] = bbl_x #(res.x[0]+res.x[6])/2
y_opt[i] = (fbr_y + bbl_y)/2#(res.x[1]+res.x[7])/2
Yre[i] = np.array([bbl_x, fbr_y, fbr_x, fbr_y, fbr_x, bbl_y, bbl_x, bbl_y])
# newcar = car.copy()
# newcar.loc[:,"x"] = x_opt
# newcar.loc[:,"y"] = y_opt
# newcar.loc[:,pts] = Yre
car.loc[:,"x"] = x_opt
car.loc[:,"y"] = y_opt
car.loc[:,pts] = Yre
return car
def obj_1d(X, x, order, N, dt, notNan, lam):
""" The cost function for 1d
X: decision variables X = [xhat, vhat, ahat]
xhat: rectified positions (N x 1) 0:N
vhat: N:2N-1
ahat: 2N-1: 3N-3 rectified acceleration (N-2 x 1)
jhat: 3N-3: 4N-6
x: position measurements (N x 1), could have NaN
N: int number of measurements
dt: float delta T
notNan: bool array indicating timestamps that have measurements
lam: given parameter
Return: float
"""
# unpack decision variables
xhat = X[:N]
offset1 = int((1+order-1)*(order-1)/2)
highest_order_dynamics = X[order*N-offset1:] # to be regularized
rescale = (30)**(order)
# select valid measurements
xhat = xhat[notNan]
x = x[notNan]
# min perturbation
c1 = LA.norm(x-xhat,2)**2 * rescale /np.count_nonzero(notNan)
c2 = LA.norm(highest_order_dynamics,2)**2 / len(highest_order_dynamics)
cost = lam*c1 + (1-lam) * c2
return cost
def const_1d(N, dt, order):
""" The constraint representing linear dynamics
N: number of timesteps for xhat, n=3N-3
Return: matrix A (2N-3 x 3N-3), such that A dot X = [0]_(nx1)
for scipy.optimize.LinearConstraint: lb<= A.dot(X) <= ub 'trust-constr'
"""
offset = int((1+order)*order/2)
n = (order+1)*N-offset
m = order*N-offset
A = np.zeros((m,n))
for o in range(order):
offset_pre = int((1+o)*o/2)
offset_post = int((2+o)*(o+1)/2)
start_row = o*N-offset_pre
end_row = (o+1)*N-offset_post
step = N-o
for i in range(start_row, end_row):
A[i][i+o] = -1
A[i][i+o+1] = 1
A[i][i+o+step] = -dt
return A
def loss(Yre, Y1, norm='l21'):
'''
different ways to compute the diff matrix
'''
notNan = ~np.isnan(np.sum(Y1,axis=-1))
Y1 = Y1[notNan,:]
Yre = Yre[notNan,:]
diff = np.abs(Y1-Yre)
N = len(diff)
if N==0:
return 0
if norm=='l21':
return np.nanmean(LA.norm(diff,axis=1))
elif norm=='l2':
return LA.norm(diff,'fro')/N
elif norm=='xy': # weighted xy
mae_x = np.abs(diff[:,[0,2,4,6]])
mae_y = np.abs(diff[:,[1,3,5,7]])
alpha = 0.3
mae_xy = alpha*mae_x + (1-alpha)*mae_y
return LA.norm(mae_xy,'fro')/N
def get_costs(Yre, Y1, x,y,v,a,theta, norm):
'''
for normalizing lambdas
'''
N = len(a)
c1m, c2m, c3m, c4m, c5m = 1,1,1,1,1 # directly calculate 2-norm
c1 = loss(Yre, Y1, norm)/c1m
c2 = LA.norm(a,2)/N/30/c2m
j = np.diff(a)
c3 = LA.norm(j,2)/N/30/c3m
st = sin(theta)
c4 = LA.norm(st,2)/N/c4m
o = np.diff(theta)
c5 = LA.norm(o,2)/N/c5m
return c1,c2,c3,c4,c5
def obj1(X, Y1,N,dt,notNan, lam1,lam2,lam3,lam4,lam5):
"""The cost function
X = [a,theta,v0,x0,y0,w,l]^T
penalize only theta, correction and accel
pretty accurate and faster than previous formulation
"""
nvalid = np.count_nonzero(notNan)
# unpack
v = X[:N]
theta = X[N:2*N]
x0,y0,w,l = X[2*N:]
Yre,x,y,a = generate(w,l,x0, y0, theta,v, outputall=True)
Yre = Yre[notNan,:]
# min perturbation
c1 = lam1 * loss(Yre, Y1, 'l21')
# regularize acceleration # not the real a or j, multiply a constant dt
c2 = lam2*LA.norm(a,2)/nvalid/30
# regularize jerk
j = np.diff(a)/dt
c3 = lam3*LA.norm(j,2)/nvalid /900
# regularize angle
st = sin(theta)
c4 = lam4*LA.norm(st,2)/nvalid
# regularize angular velocity
o = np.diff(theta)/dt
c5 = lam5*LA.norm(o,2)/nvalid/30
return c1+c2+c3+c4+c5
def unpack1(res,N,dt):
# extract results
# unpack variables
x = np.zeros(N)
y = np.zeros(N)
# ver2 unpack
v = res.x[:N]
theta = res.x[N:2*N]
x0,y0,w,l = res.x[2*N:]
Yre, x, y, a = generate(w,l,x0, y0, theta,v, outputall=True)
return Yre, x,y,v,a,theta,w,l
def rectify_single_camera(df, args):
'''
df: a single track in one camera view
'''
lam1, lam2, lam3,lam4,lam5,niter = args
timestamps = df.Timestamp.values
dt = np.diff(timestamps)
sign = df["direction"].iloc[0]
# get bottom 4 points coordinates
Y1 = np.array(df[pts])
N = len(Y1)
notNan = ~np.isnan(np.sum(Y1,axis=-1))
Y1 = Y1[notNan,:]
if (len(Y1) <= 3):
print('Not enough valid measurements: ', df['ID'].iloc[0])
# df.loc[:,pts] = np.nan
return None
# reorder Y1 to deal with backward traveling measurements
# new_order = np.argsort(np.sum(Y1[:, [0,2,4,6]],axis=1))[::int(sign)]
# Y1 = Y1[new_order,:]
first_valid = np.where(notNan==True)[0][0]
temp = df[~df["bbr_x"].isna()]
v_bbr = (max(temp.bbr_x.values)-min(temp.bbr_x.values))/(max(temp.Timestamp.values)-min(temp.Timestamp.values))
v_fbr = (max(temp.fbr_x.values)-min(temp.fbr_x.values))/(max(temp.Timestamp.values)-min(temp.Timestamp.values))
# avgv = max(min(v_bbr,50), min(v_fbr,50))
avgv = (v_bbr+v_fbr)/2
# print(avgv)
v0 = np.array([np.abs(avgv)]*N)
x0 = (Y1[0,0]+Y1[0,6])/2- sign*avgv*first_valid*1/30
y0 = (Y1[0,1]+Y1[0,7])/2
dy = Y1[-1,1]-Y1[0,1]
dx = Y1[-1,0]-Y1[0,0]
theta0 = np.ones((N))*np.arccos(sign) # parallel to lane
# theta0 = np.ones((N))*np.arctan2(dy,dx) # average angle
# no perfect box exists
w0 = np.nanmean(np.sqrt((Y1[:,1]-Y1[:,7])**2+(Y1[:,0]-Y1[:,6])**2))
l0 = np.nanmean(np.sqrt((Y1[:,2]-Y1[:,0])**2+(Y1[:,1]-Y1[:,3])**2))
X0 = np.concatenate((v0.T, theta0.T, \
[x0,y0,w0,l0]),axis=-1)
bnds = [(0,50) for i in range(0,N)]+\
[(np.arccos(sign),np.arccos(sign)) for i in range(N)]+\
[(-np.inf,np.inf),(0,np.inf),(1,4),(2,np.inf)]
# [(-np.pi/8+np.arccos(sign),np.pi/8+np.arccos(sign)) for i in range(N)]+\
Y0 = generate(w0,l0,x0, y0, theta0,v0)
diff = Y1-Y0[notNan,:]
c1max = np.nanmean(LA.norm(diff,axis=1))
c1max = max(c1max, 1e-4)
# SOLVE FOR MAX C2-C5 BY SETTING LAM2-5 = 0
lams = (100,0,0,0,0)
minimizer_kwargs = {"method":"L-BFGS-B", "args":(Y1,N,dt,notNan,*lams),'bounds':bnds,'options':{'disp': False}}
res = basinhopping(obj1, X0, minimizer_kwargs=minimizer_kwargs,niter=0)
print('\n')
print('Initilization: ',loss(Y0[notNan,:], Y1, norm='l2'))
# extract results
Yre, x,y,v,a,theta,w,l = unpack1(res,N,dt)
Yre = Yre[notNan,:]
_,c2max,c3max,c4max,c5max = get_costs(Yre, Y1, x,y,v,a,theta,'l21')
c2max,c3max,c4max,c5max = max(c2max, 1e-4), max(c3max, 1e-4), max(c4max, 1e-4), max(c5max, 1e-4)
# SOLVE AGAIN - WITH NORMALIZED OBJECTIVES
lams = (lam1/c1max,lam2/c2max,lam3/c3max,lam4/c4max,lam5/c5max)
minimizer_kwargs = {"method":"L-BFGS-B", "args":(Y1,N,dt,notNan,*lams),'bounds':bnds,'options':{'disp': False}}
res = basinhopping(obj1, X0, minimizer_kwargs=minimizer_kwargs,niter=niter)
Yre, x,y,v,a,theta,w,l = unpack1(res,N,dt)
print('Final: ',loss(Yre[notNan,:], Y1, norm='l2'))
df.loc[:,pts] = Yre
df.loc[:,'acceleration'] = a
df.loc[:,'speed'] = v
df.loc[:,'x'] = x
df.loc[:,'y'] = y
df.loc[:,'theta'] = theta
df.loc[:,'width'] = w
df.loc[:,'length'] = l
return df
def applyParallel(dfGrouped, func, args=None):
with Pool(cpu_count()) as p:
if args is None:
ret_list = list(tqdm(p.imap(func, [group for name, group in dfGrouped]), total=len(dfGrouped.groups)))
else:# if has extra arguments
ret_list = list(tqdm(p.imap(partial(func, args=args), [group for name, group in dfGrouped]), total=len(dfGrouped.groups)))
return pd.concat(ret_list)
def rectify(df):
'''
apply solving obj1 for each objects in the entire dataframe
'''
print('Rectifying...')
# filter out len<2
df = df.groupby("ID").filter(lambda x: len(x)>=2)
tqdm.pandas()
# lams = (1,0.2,0.2,0.05,0.02) # lambdas
lams = (1,0,0,0.1,0.1,0) # 1:data perturb 2: acceleration 3: jerk 4: theta 5: omega
df = applyParallel(df.groupby("ID"), rectify_single_camera, args = lams).reset_index(drop=True)
# df = df.groupby('ID').apply(rectify_single_camera, args=lams).reset_index(drop=True)
return df
def rectify_receding_horizon(df):
'''
apply solving obj1 for each objects in the entire dataframe
'''
# filter out len<2
df = df.groupby("ID").filter(lambda x: len(x)>=2)
tqdm.pandas()
# df = df.groupby("ID").progress_apply(receding_horizon_opt).reset_index(drop=True)
return df
def receding_horizon_opt(car):
'''
Y,timestamps,w,l,n,PH,IH
re-write the batch optimization (opt1 and op2) into mini-batch optimization to save computational time
n: number of frames, assuming 30 fps
PH: prediction horizon
IH: implementation horizon
'''
w,l = estimate_dimensions(car) # use some data to estimate vehicle dimensions
# print('estimated w:',w,'l:',l)
# optimization parameters
lam1 = 1 # modification of measurement
lam2 = 1 # acceleration
lam3 = 0 # jerk
lam4 = 50 # theta
lam5 = 1 # omega
PH = 200 # optimize over Prediction Horizon frames
IH = 100 # implementation horizon
sign = car['direction'].iloc[0]
timestamps = car.Timestamp.values
pts = ['bbr_x','bbr_y', 'fbr_x','fbr_y','fbl_x','fbl_y','bbl_x', 'bbl_y']
Y = np.array(car[pts])
n = len(Y)
Yre = np.empty((0,8))
a_arr = np.empty((0,0))
x_arr = np.empty((0,0))
y_arr = np.empty((0,0))
v_arr = np.empty((0,0))
theta_arr = np.empty((0,0))
for i in range(0,n-IH,IH):
# print(i,'/',n, flush=True)
Y1 = Y[i:min(i+PH,n),:]
N = len(Y1)
notNan = ~np.isnan(np.sum(Y1,axis=-1))
# if (i>0) and (np.count_nonzero(notNan)<4): # TODO: does not work if first PH has not enough measurements!
# if not enough measurement for this PH, simply use the last round of answers
# Yre = np.vstack([Yre,Yre1[:N if i+PH>=n else PH-IH,:]])
# a_arr = np.append(a_arr,a[:N if i+PH>=n else PH-IH:])
# x_arr = np.append(x_arr,x[:N if i+PH>=n else PH-IH:])
# y_arr = np.append(y_arr,y[:N if i+PH>=n else PH-IH:])
# v_arr = np.append(v_arr,v[:N if i+PH>=n else PH-IH:])
# theta_arr = np.append(theta_arr,theta[:N if i+PH>=n else PH-IH:])
# continue
Y1 = Y1[notNan,:]
ts = timestamps[i:min(i+PH,n)]
dt = np.diff(ts)
a0 = np.zeros((N))
try:
v0 = v_arr[-1]
except:
v0 =(Y1[-1,0]-Y1[0,0])/(ts[notNan][-1]-ts[notNan][0])
try:
x0 = x_arr[-1]
y0 = y_arr[-1]
except:
x0 = (Y1[0,0]+Y1[0,6])/2
y0 = (Y1[0,1]+Y1[0,7])/2
v0 = np.abs(v0)
theta0 = np.ones((N))*np.arccos(sign)
X0 = np.concatenate((a0.T, theta0.T, \
[v0,x0,y0]),axis=-1)
if sign>0:
bnds = [(-5,5) for ii in range(0,N)]+\
[(-np.pi/8,np.pi/8) for ii in range(N)]+\
[(0,40),(-np.inf,np.inf),(0,np.inf)]
else:
bnds = [(-5,5) for ii in range(0,N)]+\
[(-np.pi/8+np.pi,np.pi/8+np.pi) for ii in range(N)]+\
[(0,40),(-np.inf,np.inf),(0,np.inf)]
res = minimize(obj2, X0, (Y1,N,dt,notNan,w,l,lam1,lam2,lam3,lam4,lam5), method = 'L-BFGS-B',
bounds=bnds, options={'disp': False,'maxiter':100000})#
# extract results
Yre1, x,y,v,a,theta,omega = unpack2(res,N,dt,w,l)
Yre = np.vstack([Yre,Yre1[:N if i+PH>=n else IH,:]])
a_arr = np.append(a_arr,a[:N if i+PH>=n else IH])
x_arr = np.append(x_arr,x[:N if i+PH>=n else IH])
y_arr = np.append(y_arr,y[:N if i+PH>=n else IH])
v_arr = np.append(v_arr,v[:N if i+PH>=n else IH])
theta_arr = np.append(theta_arr,theta[:N if i+PH>=n else IH])
# write into df
car.loc[:,pts] = Yre
car.loc[:,'acceleration'] = a_arr
car.loc[:,'speed'] = v_arr
car.loc[:,'x'] = x_arr
car.loc[:,'y'] = y_arr
car.loc[:,'theta'] = theta_arr
car.loc[:,'width'] = w
car.loc[:,'length'] = l
return car
# return Yre,a_arr,x_arr,v_arr,theta_arr
def estimate_dimensions(car):
# optimization parameters
car = car[(car['camera']=='p1c3') | (car['camera']=='p1c4')]
# TODO: what to do if car has no measurements?
lam1 = 1 # modification of measurement
lam2 = 1 # acceleration
lam3 = 0 # jerk
lam4 = 50 # theta
lam5 = 1 # omega
ts = car.Timestamp.values
Y1 = np.array(car[['bbr_x','bbr_y', 'fbr_x','fbr_y','fbl_x','fbl_y','bbl_x', 'bbl_y']])
N = len(Y1)
notNan = ~np.isnan(np.sum(Y1,axis=-1))
Y1 = Y1[notNan,:]
dt = np.diff(ts)
a0 = np.zeros((N))
sign = car['direction'].iloc[0]
v0 = (Y1[-1,0]-Y1[0,0])/(ts[notNan][-1]-ts[notNan][0])
v0 = np.abs(v0)
theta0 = np.ones((N))*np.arccos(sign)
x0 = (Y1[0,0]+Y1[0,6])/2
y0 = (Y1[0,1]+Y1[0,7])/2
X0 = np.concatenate((a0.T, theta0.T, \
[v0,x0,y0,np.nanmean(np.abs(Y1[:,1]-Y1[:,7])),\
np.nanmean(np.abs(Y1[:,0]-Y1[:,2]))]),axis=-1)
if sign>0:
bnds = [(-5,5) for ii in range(0,N)]+\
[(-np.pi/8,np.pi/8) for ii in range(N)]+\
[(0,40),(-np.inf,np.inf),(0,np.inf),(1,2.59),(2,np.inf)]
else:
bnds = [(-5,5) for ii in range(0,N)]+\
[(-np.pi/8+np.pi,np.pi/8+np.pi) for ii in range(N)]+\
[(0,40),(-np.inf,np.inf),(0,np.inf),(1,2.59),(2,np.inf)]
res = minimize(obj1, X0, (Y1,N,dt,notNan,lam1,lam2,lam3,lam4,lam5), method = 'L-BFGS-B',
bounds=bnds, options={'disp': False,'maxiter':100000})#
# extract results
Yre, x,y,v,a,theta,omega,w,l = unpack1(res,N,dt)
return w,l
def generate(w,l, x0, y0, theta,v,outputall=False):
'''
constant velocity dynamics
'''
# extract results
# unpack variables
N = len(theta)
dt = [1/30]*N
vx = v*cos(theta)
vy = v*sin(theta)
a = np.diff(v)
a = np.append(a,a[-1])
a = a/dt
x = np.zeros(N)
y = np.zeros(N)
x[0] = x0
y[0] = y0
for k in range(0,N-1):
x[k+1] = x[k] + vx[k]*dt[k]
y[k+1] = y[k] + vy[k]*dt[k]
# compute positions
xa = x + w/2*sin(theta)
ya = y - w/2*cos(theta)
xb = xa + l*cos(theta)
yb = ya + l*sin(theta)
xc = xb - w*sin(theta)
yc = yb + w*cos(theta)
xd = xa - w*sin(theta)
yd = ya + w*cos(theta)
Yre = np.stack([xa,ya,xb,yb,xc,yc,xd,yd],axis=-1)
if outputall:
return Yre, x, y, a
return Yre
def calculate_score(Y1,Yre):
'''
for one box (frame)
'''
diff = Y1-Yre
score = np.nanmean(LA.norm(diff,axis=1))
return score
def score_for_box(w,l,Y):
'''
find the min score of a box of fixed w,l, with respect to measurement Y
Y: 1x8
'''
eq_cons = {'type': 'eq',
'fun' : lambda x: np.array([
(x[2]-x[0])**2-l**2,
(x[1]-x[7])**2-w**2,
(x[0]-x[6])**2,
(x[2]-x[4])**2,
(x[1]-x[3])**2,
(x[5]-x[7])**2])}
X0 = Y[0]
# X0 += np.random.normal(0, 0.1, X0.shape)
res = minimize(calculate_score, X0, (Y), method = 'SLSQP',constraints=[eq_cons],
options={'disp': False})
print(res.fun)
# plot_track(Y.reshape((1,8)))
# plot_track(res.x.reshape((1,8)))
return res
|
"""Tests for the logging transactions module focusing on threads"""
from threading import Thread
from ska.log_transactions import transaction
from tests.conftest import get_all_record_logs, clear_logger_logs
class ThreadingLogsGenerator:
"""Generate logs by spawning a number of threads and logging in them
Some uses the transaction context and some not.
"""
def __init__(self, logger=None, pass_logger=False):
self.logger = logger
self.pass_logger = pass_logger
def thread_with_transaction_exception(self, thread_index):
logger = self.logger if self.pass_logger else None
try:
with transaction(f"Transaction thread [{thread_index}]", logger=logger):
self.logger.info(
f"Transaction thread in transaction [{thread_index}], in transaction"
)
raise RuntimeError("An exception has occurred")
except RuntimeError:
pass
def thread_with_transaction(self, thread_index):
logger = self.logger if self.pass_logger else None
with transaction(f"Transaction thread [{thread_index}]", logger=logger):
self.logger.info(f"Transaction thread [{thread_index}], in transaction")
self.logger.info(f"Thread log [{thread_index}], no transaction")
def thread_without_transaction(self, thread_index):
self.logger.info(f"Thread log [{thread_index}], no transaction")
def get_logs(self):
clear_logger_logs(self.logger)
test_threads = []
for thread_index in range(10):
thread_in_transaction = Thread(
target=self.thread_with_transaction, args=(thread_index,)
)
thread_no_transaction = Thread(
target=self.thread_without_transaction, args=(thread_index,)
)
thread_exception = Thread(
target=self.thread_with_transaction_exception, args=(thread_index,)
)
test_threads.append(thread_in_transaction)
test_threads.append(thread_no_transaction)
test_threads.append(thread_exception)
for t in test_threads:
t.start()
for t in test_threads:
t.join()
return get_all_record_logs(self.logger)
|
from secml.ml.features.tests import CPreProcessTestCases
from collections import OrderedDict
try:
import torch
import torchvision
except ImportError:
CPreProcessTestCases.importskip("torch")
CPreProcessTestCases.importskip("torchvision")
else:
import torch
from torch import nn, optim
from torchvision import transforms
torch.manual_seed(0)
from secml.array import CArray
from secml.ml.features.normalization import CNormalizerDNN
from secml.ml.classifiers import CClassifierPyTorch
from secml.data.loader import CDLRandom
from secml.optim.function import CFunction
def mlp(input_dims=100, hidden_dims=(50, 50), output_dims=10):
"""Multi-layer Perceptron"""
if len(hidden_dims) < 1:
raise ValueError("at least one hidden dim should be defined")
if any(d <= 0 for d in hidden_dims):
raise ValueError("each hidden layer must have at least one neuron")
# Input layers
layers = [
('linear1', torch.nn.Linear(input_dims, hidden_dims[0])),
('relu1', torch.nn.ReLU()),
]
# Appending additional hidden layers
for hl_i, hl_dims in enumerate(hidden_dims[1:]):
prev_hl_dims = hidden_dims[hl_i] # Dims of the previous hl
i_str = str(hl_i + 2)
layers += [
('linear' + i_str, torch.nn.Linear(prev_hl_dims, hl_dims)),
('relu' + i_str, torch.nn.ReLU())]
# Output layers
layers += [
('linear' + str(len(hidden_dims) + 1),
torch.nn.Linear(hidden_dims[-1], output_dims))]
# Creating the model with the list of layers
return torch.nn.Sequential(OrderedDict(layers))
class TestCNormalizerPyTorch(CPreProcessTestCases):
@classmethod
def setUpClass(cls):
cls.ds = CDLRandom(n_samples=40, n_classes=3,
n_features=20, n_informative=15,
random_state=0).load()
model = mlp(input_dims=20, hidden_dims=(40,), output_dims=3)
loss = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-1)
cls.net = CClassifierPyTorch(model=model, loss=loss,
optimizer=optimizer, random_state=0,
epochs=10, pretrained=True)
cls.net.fit(cls.ds.X, cls.ds.Y)
cls.norm = CNormalizerDNN(net=cls.net)
CPreProcessTestCases.setUpClass()
def test_normalization(self):
"""Testing normalization."""
x = self.ds.X[0, :]
self.logger.info("Testing normalization at last layer")
self.norm.out_layer = None
out_norm = self.norm.transform(x)
out_net = self.net.get_layer_output(x, layer=None)
self.logger.info("Output of normalize:\n{:}".format(out_norm))
self.logger.info("Output of net:\n{:}".format(out_net))
self.assert_allclose(out_norm, out_net)
self.norm.out_layer = 'linear1'
self.logger.info(
"Testing normalization at layer {:}".format(self.norm.out_layer))
out_norm = self.norm.transform(x)
out_net = self.net.get_layer_output(x, layer=self.norm.out_layer)
self.logger.info("Output of normalize:\n{:}".format(out_norm))
self.logger.info("Output of net:\n{:}".format(out_net))
self.assert_allclose(out_norm, out_net)
def test_chain(self):
"""Test for preprocessors chain."""
# Inner preprocessors should be passed to the pytorch clf
with self.assertRaises(ValueError):
CNormalizerDNN(net=self.net, preprocess='min-max')
def test_gradient(self):
"""Test for gradient."""
x = self.ds.X[0, :]
layer = None
self.norm.out_layer = layer
self.logger.info("Returning gradient for layer: {:}".format(layer))
shape = self.norm.transform(x).shape
w = CArray.zeros(shape=shape)
w[0] = 1
grad = self.norm.gradient(x, w=w)
self.logger.info("Output of gradient_f_x:\n{:}".format(grad))
self.assertTrue(grad.is_vector_like)
self.assertEqual(x.size, grad.size)
layer = 'linear1'
self.norm.out_layer = layer
self.logger.info("Returning output for layer: {:}".format(layer))
out = self.net.get_layer_output(x, layer=layer)
self.logger.info("Returning gradient for layer: {:}".format(layer))
grad = self.norm.gradient(x, w=out)
self.logger.info("Output of grad_f_x:\n{:}".format(grad))
self.assertTrue(grad.is_vector_like)
self.assertEqual(x.size, grad.size)
def test_aspreprocess(self):
"""Test for normalizer used as preprocess."""
from secml.ml.classifiers import CClassifierSVM
from secml.ml.classifiers.multiclass import CClassifierMulticlassOVA
model = mlp(input_dims=20, hidden_dims=(40,), output_dims=3)
loss = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-1)
net = CClassifierPyTorch(model=model, loss=loss,
optimizer=optimizer, random_state=0,
epochs=10, preprocess='min-max')
net.fit(self.ds.X, self.ds.Y)
norm = CNormalizerDNN(net=net)
clf = CClassifierMulticlassOVA(
classifier=CClassifierSVM, preprocess=norm)
self.logger.info("Testing last layer")
clf.fit(self.ds.X, self.ds.Y)
y_pred, scores = clf.predict(
self.ds.X, return_decision_function=True)
self.logger.info("TRUE:\n{:}".format(self.ds.Y.tolist()))
self.logger.info("Predictions:\n{:}".format(y_pred.tolist()))
self.logger.info("Scores:\n{:}".format(scores))
x = self.ds.X[0, :]
self.logger.info("Testing last layer gradient")
for c in self.ds.classes:
self.logger.info("Gradient w.r.t. class {:}".format(c))
grad = clf.grad_f_x(x, y=c)
self.logger.info("Output of grad_f_x:\n{:}".format(grad))
check_grad_val = CFunction(
clf.decision_function, clf.grad_f_x).check_grad(
x, y=c, epsilon=1e-1)
self.logger.info(
"norm(grad - num_grad): %s", str(check_grad_val))
self.assertLess(check_grad_val, 1e-3)
self.assertTrue(grad.is_vector_like)
self.assertEqual(x.size, grad.size)
layer = 'linear1'
norm.out_layer = layer
self.logger.info("Testing layer {:}".format(norm.out_layer))
clf.fit(self.ds.X, self.ds.Y)
y_pred, scores = clf.predict(
self.ds.X, return_decision_function=True)
self.logger.info("TRUE:\n{:}".format(self.ds.Y.tolist()))
self.logger.info("Predictions:\n{:}".format(y_pred.tolist()))
self.logger.info("Scores:\n{:}".format(scores))
self.logger.info("Testing 'linear1' layer gradient")
grad = clf.grad_f_x(x, y=0) # y is required for multiclassova
self.logger.info("Output of grad_f_x:\n{:}".format(grad))
self.assertTrue(grad.is_vector_like)
self.assertEqual(x.size, grad.size)
if __name__ == '__main__':
CPreProcessTestCases.main()
|
########################################################################################################################
__doc__ = \
"""
Transpiler module for Michelanglo --- IO parts plus basic cleaning of PDB block
"""
__author__ = "Matteo Ferla. [Github](https://github.com/matteoferla)"
__email__ = "matteo.ferla@gmail.com"
__date__ = "2019 A.D."
__license__ = "MIT"
__version__ = "3"
__citation__ = "Ferla et al. (2020) MichelaNGLo: sculpting protein views on web pages without coding. Bioinformatics"
########################################################################################################################
from warnings import warn
from pprint import PrettyPrinter
pprint = PrettyPrinter().pprint
import pymol2, re
###############################################################
class PyMolTranspiler_io:
def load_pdb(self, file, outfile=None, mod_fx=None):
"""
Loads a pdb file into a transpiler obj. and fixes it.
The round trip is to prevent anything malicious being sent.
:param file: str file name
:return: self
**PyMOL session**: self-contained.
"""
with pymol2.PyMOL() as self.pymol: #pymol2.PyMOL()
self.pymol.cmd.set('fetch_path', self.temporary_folder)
self.pymol.cmd.load(file)
extension = file.split('.')[-1]
headers = []
gather_ss = True
if extension == 'pdb':
with open(file) as w:
headers = [row.replace('"','').replace("'",'').replace("\\",'') for row in w if any([k in row for k in ('LINK', 'HELIX', 'SHEET')])]
if any(['HELIX', 'SHEET' in headers]):
gather_ss = False
if outfile is None:
outfile = file
else:
if outfile is None:
outfile = '.'.join(file.split('.')[:-1])+'.pdb'
if mod_fx:
mod_fx()
self.raw_pdb = self.remove_anisou(self.pymol.cmd.get_pdbstr())
## fix the segi and multiple object problem.
self.fix_structure()
## add SS
if gather_ss:
myspace = {'data': []}
# myspace['data'] is the same as self.atoms, which is "kind of the same" as pymol.cmd.get_model('..').atoms
self.pymol.cmd.iterate('all', self._iterate_cmd, space=myspace)
self.parse_ss(myspace['data'])
self.raw_pdb = '\n'.join(self.ss)+'\n'+ self.raw_pdb
else:
self.raw_pdb = '\n'.join(headers)+'\n'+ self.raw_pdb
return self
@property
def pdb_block(self):
if self.raw_pdb == '':
warn('raw_PDB is empty')
self.raw_pdb = self.pymol.cmd.get_pdbstr()
if self.headers:
return '\n'.join(self.headers) + '\n' + self.remove_anisou(self.raw_pdb.lstrip())
else:
return '\n'.join(self.ss) + '\n' + self.remove_anisou(self.raw_pdb.lstrip())
def fix_structure(self):
"""
Fix any issues with structure. see self.pymol_model_chain_segi.md for more.
empty chain issue.
**PyMOL session**: dependent. Requires sigleton.
"""
# whereas a chain can be called ?, it causes problems. So these are strictly JS \w characters.
# Only latin-1 is okay in NGL. Any character above U+00FF will be rendered as the last two bytes. (U+01FF will be U+00FF say)
#non-ascii are not okay in PyMol
chaingen = self.get_new_letter()
objs = self.pymol.cmd.get_names(enabled_only=1)
prime_chains = self.get_chains(objs[0])
for on in objs[1:]:
for c in self.get_chains(on):
if not c: # missing chain ID is still causing issues.
new_chain = next(chaingen)
self.pymol.cmd.alter(f"{on} and chain ''", f'chain="{new_chain}"')
elif c in prime_chains:
new_chain = next(chaingen)
self.pymol.cmd.alter(f"{on} and chain {c}", f'chain="{new_chain}"')
else:
prime_chains.add(c)
# selenomethionine to methionine
if self.pymol.cmd.select('resn MSE') > 0:
self.pymol.cmd.alter('resn MSE and element SE', 'name=" SD "')
self.pymol.cmd.alter('resn MSE and element SE', 'element="S"')
self.pymol.cmd.alter('resn MSE', 'resn="MET"')
self.pymol.cmd.sort('all')
self.pymol.cmd.alter("all", "segi=''") # not needed. NGL does not recognise segi. Currently writtten to ignore it.
self.pymol.cmd.sort('all')
# The delete states shortcut does not work:
self.pymol.cmd.create('mike_combined','enabled',1) #the 1 means that only the first "state" = model is used.
for on in self.pymol.cmd.get_names_of_type('object:molecule'):
if on != 'mike_combined':
self.pymol.cmd.delete(on)
def parse_ss(self, data=None, **settings):
"""
PDB block Secondary structure maker
"""
def _deal_with():
if ss_last == 'H': # previous was the other type
self.ss.append('{typos} {ss_count: >3} {ss_count: >3} {resn_start} {chain} {resi_start: >4} {resn_end} {chain} {resi_end: >4} {h_class: >2} {length: >2}'.format(
typos='HELIX',
ss_count=ss_count[ss_last],
resn_start=resn_start,
resi_start=resi_start,
resn_end=resn_last,
resi_end=resi_last,
chain=chain,
h_class=1,
length=int(resi_last) - int(resi_start) # already polished
))
ss_count[ss_last] += 1
elif ss_last == 'S': # previous was the other type
self.ss.append('{typos} {ss_count: >3} {ss_count: >2}S 1 {resn_start} {chain}{resi_start: >4} {resn_end} {chain}{resi_end: >4} 0'.format(
typos='SHEET',
ss_count=ss_count[ss_last],
resn_start=resn_start,
resi_start=resi_start,
resn_end=resn_last,
resi_end=resi_last,
chain=chain,
h_class=0,
length=resi_last - resi_start
))
ss_count[ss_last] += 1
self.ss = []
if data is None:
myspace = {'data': []}
self.pymol.cmd.iterate('all', self._iterate_cmd, space=myspace)
data = myspace['data']
ss_last = 'L'
resi_start = '0'
resn_start = 'XXX'
resi_last = '0'
resn_last = 'XXX'
ss_count = {'H': 1, 'S': 1, 'L': 0}
chain = 'X'
icode_polish = lambda resi: int(re.search(r'(\d+)', resi).group(1))
for line in data: # ss_list:
if line['name'] == 'CA':
(resi_this, ss_this, resn_this, chain) = (line['resi'], line['ss'], line['resn'], line['chain'])
if ss_last != ss_this:
# deal with previous first
_deal_with()
# deal with current
if ss_this in ('S', 'H'): # start of a new
resi_start = icode_polish(resi_this)
resn_start = resn_this
ss_last = ss_this
# move on
resi_last = icode_polish(resi_this)
resn_last = resn_this
ss_last = ss_this
_deal_with()
return self
@staticmethod
def remove_anisou(block):
return '\n'.join([r for r in block.split('\n') if 'ANISOU' not in r])
|
import numpy as np
from core.transform import Transform
class Mesh:
def __init__(self, name, vertices, vertex_indices, normals, normal_indices, transform=None):
self.name = name
# we have triangle mesh
assert len(vertex_indices) % 3 == 0
assert len(normal_indices) == len(vertex_indices)
self.v = np.array(vertices, dtype=np.single)
self.n = np.array(normals, dtype=np.single)
self.vi = np.array(vertex_indices, dtype=np.uint32)
self.ni = np.array(normal_indices, dtype=np.uint32) + len(vertices)
self.coords = np.concatenate((self.v, self.n))
self.indices = np.concatenate((self.vi, self.ni))
self.normal_index_offset = len(vertex_indices)
self.transform = transform or Transform()
|
import tensorflow as tf
import numpy as np
import cv2
def extract_person(image, bbox, person_image_shape):
bbox = np.array(bbox)
new_width = float(person_image_shape[1]) / person_image_shape[0] * bbox[3]
bbox[0] -= (new_width - bbox[2]) / 2
bbox[2] = new_width
bbox[2:] += bbox[:2]
bbox = bbox.astype(np.int)
bbox[:2] = np.maximum(0, bbox[:2])
bbox[2:] = np.minimum(np.asarray(image.shape[:2][::-1]) - 1, bbox[2:])
if np.any(bbox[:2] >= bbox[2:]):
return None
image = image[bbox[1]:bbox[3], bbox[0]:bbox[2]]
image = cv2.resize(image, tuple(person_image_shape[::-1]))
return image
class Encoder(object):
def __init__(self, checkpoint_filename):
self.sess = tf.Session()
with tf.gfile.GFile(checkpoint_filename, "rb") as file_handle:
graph_def = tf.GraphDef()
graph_def.ParseFromString(file_handle.read())
tf.import_graph_def(graph_def, name="net")
self.input_var = tf.get_default_graph().get_tensor_by_name("net/images:0")
self.output_var = tf.get_default_graph().get_tensor_by_name("net/features:0")
self.image_shape = self.input_var.get_shape().as_list()[1:]
self.feature_dim = self.output_var.get_shape().as_list()[-1]
def encode(self, image, boxes, batch_size):
person_images = []
for bbox in boxes:
person_image = extract_person(image, bbox, self.image_shape[:2])
if person_image is None:
person_image = np.random.uniform(0., 255., self.image_shape).astype(np.uint8)
person_images.append(person_image)
person_images = np.asarray(person_images)
features = np.zeros((person_images.shape[0], self.feature_dim), np.float32)
for i in range(0, features.shape[0], batch_size):
features[i:i + batch_size] = self.sess.run(self.output_var,
feed_dict={self.input_var: person_images[i:i + batch_size]})
return features
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## NUS School of Computing
## Email: yaoyao.liu@u.nus.edu
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import random
import numpy as np
import scipy.misc as scm
from tqdm import trange
class TieredImageNetDataLoader(object):
def __init__(self, shot_num, way_num, episode_test_sample_num):
self.shot_num = shot_num
self.way_num = way_num
self.episode_test_sample_num = episode_test_sample_num
self.num_samples_per_class = episode_test_sample_num + shot_num
metatrain_folder = './processed_images/train'
metaval_folder = './processed_images/val'
metatest_folder = './processed_images/test'
npy_dir = './episode_filename_list/'
if not os.path.exists(npy_dir):
os.mkdir(npy_dir)
self.npy_base_dir = npy_dir + str(self.shot_num) + 'shot_' + str(self.way_num) + 'way/'
if not os.path.exists(self.npy_base_dir):
os.mkdir(self.npy_base_dir)
self.metatrain_folders = [os.path.join(metatrain_folder, label) \
for label in os.listdir(metatrain_folder) \
if os.path.isdir(os.path.join(metatrain_folder, label)) \
]
self.metaval_folders = [os.path.join(metaval_folder, label) \
for label in os.listdir(metaval_folder) \
if os.path.isdir(os.path.join(metaval_folder, label)) \
]
self.metatest_folders = [os.path.join(metatest_folder, label) \
for label in os.listdir(metatest_folder) \
if os.path.isdir(os.path.join(metatest_folder, label)) \
]
def get_images(self, paths, labels, nb_samples=None, shuffle=True):
if nb_samples is not None:
sampler = lambda x: random.sample(x, nb_samples)
else:
sampler = lambda x: x
images = [(i, os.path.join(path, image)) \
for i, path in zip(labels, paths) \
for image in sampler(os.listdir(path))]
if shuffle:
random.shuffle(images)
return images
def generate_data_list(self, phase='train', episode_num=None):
if phase=='train':
folders = self.metatrain_folders
if episode_num is None:
episode_num = 20000
if not os.path.exists(self.npy_base_dir+'/train_filenames.npy'):
print('Generating train filenames')
all_filenames = []
for _ in trange(episode_num):
sampled_character_folders = random.sample(folders, self.way_num)
random.shuffle(sampled_character_folders)
labels_and_images = self.get_images(sampled_character_folders, range(self.way_num), nb_samples=self.num_samples_per_class, shuffle=False)
labels = [li[0] for li in labels_and_images]
filenames = [li[1] for li in labels_and_images]
all_filenames.extend(filenames)
np.save(self.npy_base_dir+'/train_labels.npy', labels)
np.save(self.npy_base_dir+'/train_filenames.npy', all_filenames)
print('Train filename and label lists are saved')
elif phase=='val':
folders = self.metaval_folders
if episode_num is None:
episode_num = 600
if not os.path.exists(self.npy_base_dir+'/val_filenames.npy'):
print('Generating val filenames')
all_filenames = []
for _ in trange(episode_num):
sampled_character_folders = random.sample(folders, self.way_num)
random.shuffle(sampled_character_folders)
labels_and_images = self.get_images(sampled_character_folders, range(self.way_num), nb_samples=self.num_samples_per_class, shuffle=False)
labels = [li[0] for li in labels_and_images]
filenames = [li[1] for li in labels_and_images]
all_filenames.extend(filenames)
np.save(self.npy_base_dir+'/val_labels.npy', labels)
np.save(self.npy_base_dir+'/val_filenames.npy', all_filenames)
print('Val filename and label lists are saved')
elif phase=='test':
folders = self.metatest_folders
if episode_num is None:
episode_num = 600
if not os.path.exists(self.npy_base_dir+'/test_filenames.npy'):
print('Generating test filenames')
all_filenames = []
for _ in trange(episode_num):
sampled_character_folders = random.sample(folders, self.way_num)
random.shuffle(sampled_character_folders)
labels_and_images = self.get_images(sampled_character_folders, range(self.way_num), nb_samples=self.num_samples_per_class, shuffle=False)
labels = [li[0] for li in labels_and_images]
filenames = [li[1] for li in labels_and_images]
all_filenames.extend(filenames)
np.save(self.npy_base_dir+'/test_labels.npy', labels)
np.save(self.npy_base_dir+'/test_filenames.npy', all_filenames)
print('Test filename and label lists are saved')
else:
print('Please select vaild phase')
def load_list(self, phase='train'):
if phase=='train':
self.train_filenames = np.load(self.npy_base_dir + 'train_filenames.npy').tolist()
self.train_labels = np.load(self.npy_base_dir + 'train_labels.npy').tolist()
elif phase=='val':
self.val_filenames = np.load(self.npy_base_dir + 'val_filenames.npy').tolist()
self.val_labels = np.load(self.npy_base_dir + 'val_labels.npy').tolist()
elif phase=='test':
self.test_filenames = np.load(self.npy_base_dir + 'test_filenames.npy').tolist()
self.test_labels = np.load(self.npy_base_dir + 'test_labels.npy').tolist()
elif phase=='all':
self.train_filenames = np.load(self.npy_base_dir + 'train_filenames.npy').tolist()
self.train_labels = np.load(self.npy_base_dir + 'train_labels.npy').tolist()
self.val_filenames = np.load(self.npy_base_dir + 'val_filenames.npy').tolist()
self.val_labels = np.load(self.npy_base_dir + 'val_labels.npy').tolist()
self.test_filenames = np.load(self.npy_base_dir + 'test_filenames.npy').tolist()
self.test_labels = np.load(self.npy_base_dir + 'test_labels.npy').tolist()
else:
print('Please select vaild phase')
def process_batch(self, input_filename_list, input_label_list, batch_sample_num, reshape_with_one=True):
new_path_list = []
new_label_list = []
for k in range(batch_sample_num):
class_idxs = list(range(0, self.way_num))
random.shuffle(class_idxs)
for class_idx in class_idxs:
true_idx = class_idx*batch_sample_num + k
new_path_list.append(input_filename_list[true_idx])
new_label_list.append(input_label_list[true_idx])
img_list = []
for filepath in new_path_list:
this_img = scm.imread(filepath)
this_img = this_img / 255.0
img_list.append(this_img)
if reshape_with_one:
img_array = np.array(img_list)
label_array = one_hot(np.array(new_label_list)).reshape([1, self.way_num*batch_sample_num, -1])
else:
img_array = np.array(img_list)
label_array = self.one_hot(np.array(new_label_list)).reshape([self.way_num*batch_sample_num, -1])
return img_array, label_array
def one_hot(self, inp):
n_class = inp.max() + 1
n_sample = inp.shape[0]
out = np.zeros((n_sample, n_class))
for idx in range(n_sample):
out[idx, inp[idx]] = 1
return out
def get_batch(self, phase='train', idx=0):
if phase=='train':
all_filenames = self.train_filenames
labels = self.train_labels
elif phase=='val':
all_filenames = self.val_filenames
labels = self.val_labels
elif phase=='test':
all_filenames = self.test_filenames
labels = self.test_labels
else:
print('Please select vaild phase')
one_episode_sample_num = self.num_samples_per_class*self.way_num
this_task_filenames = all_filenames[idx*one_episode_sample_num:(idx+1)*one_episode_sample_num]
epitr_sample_num = self.shot_num
epite_sample_num = self.episode_test_sample_num
this_task_tr_filenames = []
this_task_tr_labels = []
this_task_te_filenames = []
this_task_te_labels = []
for class_k in range(self.way_num):
this_class_filenames = this_task_filenames[class_k*self.num_samples_per_class:(class_k+1)*self.num_samples_per_class]
this_class_label = labels[class_k*self.num_samples_per_class:(class_k+1)*self.num_samples_per_class]
this_task_tr_filenames += this_class_filenames[0:epitr_sample_num]
this_task_tr_labels += this_class_label[0:epitr_sample_num]
this_task_te_filenames += this_class_filenames[epitr_sample_num:]
this_task_te_labels += this_class_label[epitr_sample_num:]
this_inputa, this_labela = self.process_batch(this_task_tr_filenames, this_task_tr_labels, epitr_sample_num, reshape_with_one=False)
this_inputb, this_labelb = self.process_batch(this_task_te_filenames, this_task_te_labels, epite_sample_num, reshape_with_one=False)
return this_inputa, this_labela, this_inputb, this_labelb
|
import copy
import torch
import torch.nn as nn
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoders=6, num_decoders=6, dim_feedforward=2048, dropout=0.1):
super(Transformer, self).__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)
self.encoder = TransformerEncoder(encoder_layer, num_encoders)
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout)
self.decoder = TransformerDecoder(decoder_layer, num_decoders)
self.d_model = d_model
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, pos_embed, query_embed):
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, pos_embed)
hs = self.decoder(tgt, memory, pos_embed, query_embed)
return hs
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super(TransformerEncoder, self).__init__()
self.layers = nn.ModuleList([copy.deepcopy(encoder_layer) for _ in range(num_layers)])
def forward(self, src, pos):
for layer in self.layers:
src = layer(src, pos)
return src
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers):
super(TransformerDecoder, self).__init__()
self.layers = nn.ModuleList([copy.deepcopy(decoder_layer) for _ in range(num_layers)])
def forward(self, tgt, memory, pos, query_pos):
for layer in self.layers:
tgt = layer(tgt, memory, pos, query_pos)
return tgt
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
super(TransformerEncoderLayer, self).__init__()
self.self_attention_layer = nn.MultiheadAttention(d_model, nhead, dropout)
self.attention_norm = nn.LayerNorm(d_model)
self.attention_dropout = nn.Dropout(dropout)
self.ffn = FeedForwardNetwork(d_model, dim_feedforward, dropout)
self.ffn_norm = nn.LayerNorm(d_model)
self.ffn_dropout = nn.Dropout(dropout)
def forward(self, src, pos):
norm_src = self.attention_norm(src)
attention_out = self.self_attention_layer(query=norm_src + pos, key=norm_src + pos, value=norm_src)[0]
src = src + self.attention_dropout(attention_out)
norm_src = self.ffn_norm(src)
ffn_out = self.ffn(norm_src)
src = src + self.ffn_dropout(ffn_out)
return src
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
super(TransformerDecoderLayer, self).__init__()
self.self_attention_layer = nn.MultiheadAttention(d_model, nhead, dropout)
self.self_attention_norm = nn.LayerNorm(d_model)
self.self_attention_dropout = nn.Dropout(dropout)
self.encoder_decoder_attention_layer = nn.MultiheadAttention(d_model, nhead, dropout)
self.encoder_decoder_attention_norm = nn.LayerNorm(d_model)
self.encoder_decoder_attention_dropout = nn.Dropout(dropout)
self.ffn = FeedForwardNetwork(d_model, dim_feedforward, dropout)
self.ffn_norm = nn.LayerNorm(d_model)
self.ffn_dropout = nn.Dropout(dropout)
def forward(self, tgt, memory, pos, query_pos):
norm_tgt = self.self_attention_norm(tgt)
self_attention_out = self.self_attention_layer(query=norm_tgt + query_pos, key=norm_tgt + query_pos, value=norm_tgt)[0]
tgt = tgt + self.self_attention_dropout(self_attention_out)
norm_tgt = self.encoder_decoder_attention_norm(tgt)
encoder_decoder_attention_out = self.encoder_decoder_attention_layer(norm_tgt + query_pos, memory + pos, memory)[0]
tgt = tgt + self.encoder_decoder_attention_dropout(encoder_decoder_attention_out)
norm_tgt = self.ffn_norm(tgt)
ffn_out = self.ffn(norm_tgt)
tgt = tgt + self.ffn_dropout(ffn_out)
return tgt
class FeedForwardNetwork(nn.Module):
def __init__(self, d_model, dim_feedforward, dropout=0.1):
super(FeedForwardNetwork, self).__init__()
self.in_linear = nn.Linear(in_features=d_model, out_features=dim_feedforward)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(p=dropout)
self.out_linear = nn.Linear(in_features=dim_feedforward, out_features=d_model)
def forward(self, x):
x = self.in_linear(x)
x = self.activation(x)
x = self.dropout(x)
x = self.out_linear(x)
return x
|
from flask import Flask, request, jsonify
from io import BytesIO
import os
from processor import processImage
app = Flask(__name__)
CORS_ORIGIN = 'http://localhost:8080'
OUTPUT_PATH = 'assets'
class InvalidFiletype(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.errorhandler(InvalidFiletype)
def handle_invalid_filetype(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/api/upload', methods=['OPTIONS'])
def upload_options():
print('setting up response for options')
resp = jsonify({'foo': 'bar baz'})
resp.headers.add('Access-Control-Allow-Origin', CORS_ORIGIN)
resp.headers.add('Access-Control-Allow-Methods', 'POST,OPTIONS')
resp.headers.add('Access-Control-Allow-Headers', 'Content-Type')
resp.status_code = 200
return resp
@app.route("/api/upload/<int:id>", methods=["POST"])
def upload(id):
print('Received uploads')
outputs = []
for upload in request.files.getlist('images'):
filename = upload.filename
print('processing file: ', filename)
ext = os.path.splitext(filename)[1][1:].strip().lower()
if ext in set(['jpg', 'jpeg', 'png', 'PNG']):
print('File supported moving on.')
else:
raise InvalidFiletype('Unsupported file type {}'.format(ext), status_code=415)
imageInBytes = BytesIO(upload.read())
outputImageProcessing = processImage(imageInBytes, OUTPUT_PATH)
outputs.append(outputImageProcessing)
response = jsonify({ 'uploaded': outputs })
response.headers.add('Access-Control-Allow-Origin', CORS_ORIGIN)
response.status_code = 200
# print(uploaded_files)
return response
if __name__ == '__main__':
app.run(port=5001)
|
from gcloud import storage
from gcloud.storage.blob import Blob
class xamoom_acl(object):
#actors
ALL = 0
ALL_AUTH = 1
DOMAIN = 2
GROUP = 3
USER = 4
#rights
GRANT_READ = 0
GRANT_WRITE = 1
GRANT_OWNER = 2
REVOKE_READ = 3
REVOKE_WRITE = 4
REVOKE_OWNER = 5
#members
actor = None
actor_name = None
right = None
def __init__(self,actor,right,actor_name=None):
self.actor = actor
self.actor_name = actor_name
self.right = right
def apply_acl(self,obj):
obj_acl = None
if self.actor == xamoom_acl.ALL:
obj_acl = obj.acl.all()
elif self.actor == xamoom_acl.ALL_AUTH:
obj_acl = obj.acl.all_authenticated()
elif self.actor == xamoom_acl.DOMAIN:
obj_acl = obj.acl.domain(self.actor_name)
elif self.actor == xamoom_acl.GROUP:
obj_acl = obj.acl.group(self.actor_name)
elif self.actor == xamoom_acl.USER:
obj_acl = obj.acl.user(self.actor_name)
#set acl
if self.right == xamoom_acl.GRANT_READ:
obj_acl.grant_read()
elif self.right == xamoom_acl.GRANT_WRITE:
obj_acl.grant_write()
elif self.right == xamoom_acl.GRANT_OWNER:
obj_acl.grant_owner()
elif self.right == xamoom_acl.REVOKE_READ:
obj_acl.revoke_read()
elif self.right == xamoom_acl.REVOKE_WRITE:
obj_acl.revoke_write()
elif self.right == xamoom_acl.REVOKE_OWNER:
obj_acl.revoke_owner()
obj.acl.save()
class xamoom_storage(object):
def __init__(self):
self.__client = storage.Client()
def list_blobs(self, bucket_name, dir_only=False, prefix=None):
bucket = self.__client.get_bucket(bucket_name)
# add slash on end
if prefix != None and prefix.endswith('/') == False:
prefix = prefix + '/'
if dir_only == True:
return bucket.list_blobs(prefix = prefix, delimiter='/')
else:
return bucket.list_blobs()
def copy_blob(self, blob, destination_bucket_name, new_name=None):
destination_bucket = self.__client.get_bucket(destination_bucket_name)
blob.bucket.copy_blob(blob, destination_bucket, new_name=new_name)
def upload_blob(self, source_path, destination_path,
destination_bucket_name, content_type):
destination_bucket = self.__client.get_bucket(destination_bucket_name)
blob = Blob(destination_path, destination_bucket)
with open(source_path, 'rb') as f:
blob.upload_from_file(f, content_type=content_type)
def read_blob(self,bucket_name,file_name,destination_file=None):
blob = self.download_blob(bucket_name,file_name)
if destination_file != None:
with open(destination_file, 'wb') as f:
blob.download_to_file(f)
else:
return blob.download_as_string()
def download_blob(self, bucket_name, file_name):
bucket = self.__client.get_bucket(bucket_name)
blob = bucket.get_blob(file_name)
return blob
def set_acl(self,bucket_name,acl,file_name=None):
obj = self.__client.get_bucket(bucket_name)
if file_name != None:
obj = obj.get_blob(file_name)
acl.apply_acl(obj)
|
import os, logging
from flask import Flask
from flask.ext.basicauth import BasicAuth
app = Flask(__name__)
app.config.from_object(os.environ.get('SETTINGS'))
if app.config.get('BASIC_AUTH_USERNAME'):
app.config['BASIC_AUTH_FORCE'] = True
basic_auth = BasicAuth(app)
print "============"
print app.config
print "============"
if not app.debug:
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
# jinja filter first name
def first_name(string):
return string.strip().split()[0]
app.jinja_env.filters['first_name'] = first_name
|
import bluetooth
import lightblue
# we should know
target_name = "Galaxy S8"
# we don't know yet
obex_port = None
target_address = None
print("searching for nearby devices...")
nearby_devices = bluetooth.discover_devices()
for bdaddr in nearby_devices:
print(bluetooth.lookup_name( bdaddr ))
if target_name == bluetooth.lookup_name( bdaddr ):
print("found the target device!")
target_address = bdaddr
break
print("searching for the object push service...")
print('Port Service')
services = lightblue.findservices(target_address)
for service in services:
print(str(service[1]) + ' ' + str(service[2]))
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import shapefile as shp
from structlog import get_logger
from collections import namedtuple
from mappa import config
logger = get_logger()
NomConfig = namedtuple("NomConfig", ["intensity", "palette"])
NOMENCLATURE = [
("Sin poblacion hablante de lengua indigena", NomConfig(intensity=0, palette=0)),
("Menor de 2,500", NomConfig(intensity=1, palette=0)),
("De 2,500 a 4,999", NomConfig(intensity=2, palette=0)),
("De 5,000 a 14,999", NomConfig(intensity=3, palette=0)),
("De 15,000 y mas", NomConfig(intensity=0, palette=0)),
("Menor de 2,500 y de 2,500 a 4,999", NomConfig(intensity=2, palette=1)),
("Menor de 2,500 y de 5,000 a 14,999", NomConfig(intensity=3, palette=1)),
("De 2,500 a 4,999 y de 5,000 a 14,999", NomConfig(intensity=4, palette=1)),
("De 5,000 a 14,999 y de 15,000 y mas", NomConfig(intensity=5, palette=1)),
]
PALETTE_STEPS = 6
PALETTE_PINK = sns.cubehelix_palette(PALETTE_STEPS)
PALETTE_NAVY = sns.light_palette("navy", PALETTE_STEPS)
PALETTES = [
PALETTE_PINK,
PALETTE_NAVY,
]
FigurePoints = namedtuple("FigurePoints", ["min_x", "max_x", "min_y", "max_y"])
def read_shape_file(*, file_path, encoding="ISO-8859-1"):
logger.info("shp.reading", file_path=file_path, encoding=encoding)
return shp.Reader(file_path, encoding=encoding)
def zoom_plot(*, figure_points, padding=50000):
fp = figure_points
dx = fp.max_x - fp.min_x
dy = fp.max_y - fp.min_y
if dx > dy:
adjustment = int(dx - dy) / 2
left = fp.min_x - padding
right = fp.min_x + padding + dx
bottom = fp.min_y - padding - adjustment
top = fp.min_y + padding + dx - adjustment
else:
adjustment = int(dy - dx) / 2
left = fp.min_x - padding - adjustment
right = fp.min_x + padding + dy - adjustment
bottom = fp.min_y - padding
top = fp.min_y + padding + dy
logger.info("plot.zooming", left=left, right=right, bottom=bottom, top=top)
plt.xlim((left, right))
plt.ylim((bottom, top))
def split_points(points):
x = [i[0] for i in points]
y = [i[1] for i in points]
return x, y
def get_color_from_legend(legend):
config = dict(NOMENCLATURE).get(legend, 0)
return PALETTES[config.palette][config.intensity]
def plot_shape_file(*, shape_file, figure):
logger.info("plt.rendering", shape_file=shape_file.shapeName)
figure = plt.figure()
ax1 = figure.add_subplot()
# Remove axis values:
if not config.DEBUG:
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
# Plot all shapes:
for shape in shape_file.shapeRecords():
x, y = split_points(shape.shape.points[:])
ax1.plot(x, y, linewidth=0.2, color="k")
return ax1
def highlight_dataframe_by_intensity(*, data_frame, shape_file, ax):
"""Colours data frame by intensity.
Returns top, right, left, and bottom points by DataFrame."""
for i, (pk, legend) in enumerate(
zip(data_frame.index.array, data_frame.DPHLIL_LEY)
):
shape_ex = shape_file.shape(pk)
x_lon, y_lat = [], []
for x, y in shape_ex.points:
if i == 0:
min_x, max_x = x, x
min_y, max_y = y, y
# Calculate min/max points:
if x < min_x:
min_x = x
if x > max_x:
max_x = x
if y < min_y:
min_y = y
if y > max_y:
max_y = y
x_lon.append(x)
y_lat.append(y)
color = get_color_from_legend(legend)
ax.fill(x_lon, y_lat, color=color)
return FigurePoints(min_x=min_x, max_x=max_x, min_y=min_y, max_y=max_y)
def transform_shape_file_to_data_frame(*, shape_file):
logger.info("shp.dataframing", shape_file=shape_file.shapeName)
column_names = [r[0] for r in shape_file.fields][1:]
records = shape_file.records()
shape_points = [s.points for s in shape_file.shapes()]
data_frame = pd.DataFrame(columns=column_names, data=records)
data_frame = data_frame.assign(coords=shape_points)
return data_frame
def get_data_frame_by_state(*, shape_file, state):
data_frame = transform_shape_file_to_data_frame(shape_file=shape_file)
data_frame = data_frame.query('EDO_LEY == "{}"'.format(state))
return data_frame
def configure_plot(params=None, figsize=(10, 9)):
config = {
"style": "whitegrid",
"palette": "pastel",
"color_codes": True,
}
if params is not None:
config.update(params)
logger.info("seaborn.configuring", **config)
sns.set(**config)
sns.set_style("whitegrid", {"axes.grid": False, "font.family": "DejaVu Sans",})
sns.mpl.rc("figure", figsize=figsize)
sns.palplot(PALETTE_PINK)
sns.set_palette(PALETTE_PINK)
def render(*, file_path, state):
state = config.normalize_state_name(state)
configure_plot()
shape_file = read_shape_file(file_path=file_path)
data_frame = get_data_frame_by_state(shape_file=shape_file, state=state)
figure = plt.figure()
ax = plot_shape_file(shape_file=shape_file, figure=figure)
figure_points = highlight_dataframe_by_intensity(
data_frame=data_frame, shape_file=shape_file, ax=ax
)
zoom_plot(figure_points=figure_points)
export_plot(name=state)
def render_all(*, file_path):
for state in config.STATES:
render(file_path=file_path, state=state)
def export_plot(*, name, file_format="png"):
file_name = "{}.{}".format(name, file_format)
plt.savefig(fname=file_name, format=file_format)
logger.info("image.saved", file_name=file_name)
|
from django.db import models
class EFilingSubmission(models.Model):
id = models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
)
created_date = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(blank=True, null=True)
submission_id = models.CharField(max_length=100, null=True)
transaction_id = models.CharField(max_length=100, null=True)
package_number = models.CharField(max_length=100, null=True)
package_url = models.CharField(max_length=200, null=True)
application = models.ForeignKey(
"Application",
related_name="efiling_submission_application_id",
on_delete=models.SET_NULL,
blank=True,
null=True,
)
|
import sys
sys.path.insert(0, '..')
import numpy as np
import torch
import os
import psutil
import random
import pickle
from tqdm import tqdm
from collections import OrderedDict
from deepctr_torch.inputs import (DenseFeat, SparseFeat, VarLenSparseFeat,
get_feature_names)
from deepctr_torch.models.din import DIN
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import accuracy_score,precision_score,recall_score,roc_auc_score
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self,x):
return
def show_memory_info(hint):
pid = os.getpid()
p = psutil.Process(pid)
info = p.memory_full_info()
memory = info.uss / 1024. / 1024
print('{} memory used: {} MB'.format(hint, memory))
def get_xy_fd(flag_stage=1,group_index=0,group_num = 0 ):
# feature_columns = [SparseFeat('user', 3, embedding_dim=8), SparseFeat('gender', 2, embedding_dim=8),
# SparseFeat('item', 3 + 1, embedding_dim=8), SparseFeat('item_gender', 2 + 1, embedding_dim=8),
# DenseFeat('score', 1)]
# feature_columns += [VarLenSparseFeat(SparseFeat('hist_item', 3 + 1, embedding_dim=8), 4, length_name="seq_length"),
# VarLenSparseFeat(SparseFeat('hist_item_gender', 2 + 1, embedding_dim=8), 4, length_name="seq_length")]
# behavior_feature_list = ["item", "item_gender"]
# uid = np.array([0, 1, 2])
# ugender = np.array([0, 1, 0])
# iid = np.array([1, 2, 3]) # 0 is mask value
# igender = np.array([1, 2, 1]) # 0 is mask value
# score = np.array([0.1, 0.2, 0.3])
# hist_iid = np.array([[1, 2, 3, 0], [1, 2, 3, 0], [1, 2, 0, 0]])
# hist_igender = np.array([[1, 1, 2, 0], [2, 1, 1, 0], [2, 1, 0, 0]])
# behavior_length = np.array([3, 3, 2])
import pickle
if(group_num==10):
#f2 = open('../ml-1M/temp_sample.pkl','rb')
f2 = open('../ml-1M/temp_all_10groups.pkl','rb')
elif(group_num == 100):
f2 = open('../ml-1M/temp_all_100groups.pkl','rb')
else:
raise ValueError("Must input a group num either 10 or 100!")
uid = pickle.load(f2)
mid = pickle.load(f2)
rating = pickle.load(f2)
show_memory_info('before load hist_mid')
hist_mid= pickle.load(f2)
show_memory_info('after load hist_mid')
seq_length = pickle.load(f2)
stage1= pickle.load(f2)
stage2= pickle.load(f2)
stage3= pickle.load(f2)
stage4= pickle.load(f2)
f2.close()
uset = set(uid)
num_user = len(uset)
pad_len = 1
for i in range(len(hist_mid)):
if pad_len < len(hist_mid[i]):
pad_len = len(hist_mid[i])
# 将hist_mid补齐为 len(hist_mid) x pad_len的矩阵
feature_columns = [SparseFeat('user_id', num_user+1, embedding_dim=8), SparseFeat('mid',max(mid)+1
, embedding_dim=8)]
feature_columns += [VarLenSparseFeat(SparseFeat('hist_mid', max(mid)+1, embedding_dim=8), 50, length_name="seq_length")]
behavior_feature_list = ['mid']
uid_s1 = []
hist_mid_s1 = []
mid_s1 = []
seq_length_s1=[]
rating_s1=[]
# feature_dict = {'user': uid, 'gender': ugender, 'item': iid, 'item_gender': igender,
# 'hist_item': hist_iid, 'hist_item_gender': hist_igender, 'score': score,
# "seq_length": behavior_length}
if flag_stage==2:
stage1 = stage2[group_index]
if flag_stage==3:
stage1 = stage3
if flag_stage==4:
stage1 = stage4
# import pdb
# pdb.set_trace()
for i in range(0,len(stage1)):
index = stage1[i]
uid_s1.append(uid[index])
mid_s1.append(mid[index])
hist_mid_s1.append(hist_mid[index])
seq_length_s1.append(len(hist_mid[index]))
rating_s1.append(rating[index])
hist_mid_s1 = pad_sequences(hist_mid_s1,maxlen = pad_len+1,padding = 'pre')
uid_s1 = np.array(uid_s1)
hist_mid_s1 = np.array(hist_mid_s1)
seq_length_s1 = np.array(seq_length_s1)
mid_s1 = np.array(mid_s1)
rating_s1 = np.array(rating_s1)
feature_dict = {'user_id':uid_s1,'hist_mid':hist_mid_s1,'seq_length':seq_length_s1,'mid':mid_s1}
x = {name: feature_dict[name] for name in get_feature_names(feature_columns)}
#y = np.array([1, 0, 1])
x['hist_mid'] = x['hist_mid'][:,-50:]
x['seq_length'] = np.where(x['seq_length']>=50,50,x['seq_length'])
y = rating_s1
####################################以下代码用于存下一个list,list[1]表示uid=1的用户点过的所有item,它是一个set###########################
if(flag_stage==4):
all_mid = set()
id_to_histmid_list = ["padding"] # 下标从1开始
for i in range(1,num_user+1):
this_user_histmid = set(hist_mid_s1[i-1]) #不用把0去掉
id_to_histmid_list.append(this_user_histmid)
all_mid |=this_user_histmid
# f1 = open("id_to_histmid.pkl","wb")
# pickle.dump(all_mid,f1);
# pickle.dump(id_to_histmid_list,f1);
# f1.close()
#import gc
#gc.collect()
######################################################end######################################################################
if(flag_stage!=4):
return x, y, feature_columns, behavior_feature_list
else:
return x, y, feature_columns, behavior_feature_list,all_mid,id_to_histmid_list
if __name__ == "__main__":
group_list = ["padding"] # 储存10个分组的id编号
model_list = ["padding"] # 储存10个阶段的模型
id_group_dict = dict()
add_att_constraint = True
#x, y, feature_columns, behavior_feature_list = get_xy_fd(flag_stage)
device = 'cpu'
use_cuda = True
if use_cuda and torch.cuda.is_available():
print('cuda ready...')
device = 'cuda:0'
Tmp = input("Press any key to start")
flag_stage = int(input("Please input the Stage Number:"))
group_num = int(input("Please input the Group number:"))
add_att_constraint = input("Do you want to add group attention constraint?[Y/N]").upper()
print("This training is on Stage"+str(flag_stage))
if(flag_stage==2):
print("There are {0} Groups in total".format(group_num))
for i in range(0,group_num):
print("Training on Stage2-Group"+str(i))
show_memory_info('before get data')
x, y, feature_columns, behavior_feature_list = get_xy_fd(flag_stage,i,group_num)
show_memory_info('aftergetdata')
model = DIN(feature_columns, behavior_feature_list, device=device, att_weight_normalization=True)
model.compile('adagrad', 'binary_crossentropy',
metrics=['acc'])
model.load_state_dict(torch.load("staged_models/model_10groups.pth"))
for param in model.parameters():
param.requires_grad = False
for param in model.attention.parameters():
param.requires_grad = True
dict_id = list(set(x['user_id']))
dict_group = [i]*len(dict_id)
id_group_dict.update(dict(zip(dict_id,dict_group)))
################################################store the id_to_group_dictionary
history = model.fit(x, y, batch_size=256, epochs=10, verbose=2, validation_split=0)
path = "staged_models/model_stage2_Group"+str(group_num)+"-" + str(i)+".pth"
torch.save(model.state_dict(), path)
print("Stage: "+str(flag_stage)+" Stored in : "+path)
print("\n")
f1 = open('staged_models/id_to_group{}.pkl'.format(group_num),'wb')
pickle.dump(id_group_dict,f1)
f1.close()
# import pdb
# pdb.set_trace()
if flag_stage == 1:
x, y, feature_columns, behavior_feature_list = get_xy_fd(flag_stage,group_num=group_num)
model = DIN(feature_columns, behavior_feature_list, device=device, att_weight_normalization=True)
model.compile('adagrad', 'binary_crossentropy',
metrics=['auc'])
history = model.fit(x, y, batch_size=256, epochs=10, verbose=2, validation_split=0)
if(group_num==100):
path = "staged_models/model_100groups.pth"
if(group_num == 10):
path = "staged_models/model_10groups.pth"
#path = "staged_models/model_100groups.pth"
torch.save(model.state_dict(), path)
print("Stage: "+str(flag_stage)+" Stored in : "+path)
print("\n")
if flag_stage == 3:
x, y, feature_columns, behavior_feature_list = get_xy_fd(flag_stage,group_num=group_num)
model = DIN(feature_columns, behavior_feature_list, device=device, att_weight_normalization=True)
model.compile('adagrad','binary_crossentropy', metrics=['acc'])
# load 10 models in stage2
if add_att_constraint == 'Y':
for i in range(group_num):
tmp = DIN(feature_columns, behavior_feature_list, device=device, att_weight_normalization=True)
model_list.append(tmp)
path = "staged_models/model_stage2_Group"+str(group_num)+"-" + str(i)+".pth"
model_list[i+1].load_state_dict(torch.load(path,map_location = device))
model_list[i+1].dnn = Identity()
model_list[i+1].dnn_linear = Identity()
history = model.fit(x, y, batch_size = 512, epochs=10, verbose=2, validation_split=0.,model_list = model_list)
if(add_att_constraint == 'Y'):
store_path = "model3-Group"+str(group_num)+".pth"
else:
store_path = "model3-Group"+str(group_num)+"no_constraint.pth"
torch.save(model.state_dict(),store_path )
if flag_stage == 4:
x, y, feature_columns, behavior_feature_list,all_mid,id_to_histmid_list = get_xy_fd(flag_stage,group_num=group_num)
model = DIN(feature_columns, behavior_feature_list, device=device, att_weight_normalization=True)
#feature_dict = {'user_id':uid_s1,'hist_mid':hist_mid_s1,'seq_length':seq_length_s1,'mid':mid_s1}
feature_index = ["user_id","hist_mid","seq_length","mid"]
if(add_att_constraint == 'Y'):
path = "model3-Group"+str(group_num)+".pth"
else:
path = "model3-Group"+str(group_num)+"no_constraint.pth"
model.load_state_dict(torch.load(path))
f2 = open('staged_models/id_to_group{}.pkl'.format(group_num),'rb')
id_group_index = pickle.load(f2)
f2.close()
model.compile('adagrad','binary_crossentropy', metrics=['acc',])
# this is part of the function evaluate
pred_ans = model.predict(x, batch_size = 256)
eval_result = {}
#for name, metric_fun in model.metrics.items():
# eval_result[name] = metric_fun(y, pred_ans)
# eval_result["precison"] = precision_score(y,np.where(np.array(pred_ans)>0.5,1,0))
# eval_result["recall"] = recall_score(y,np.where(np.array(pred_ans)>0.5, 1, 0))
#eval_result["hitrate"] = hitrate_score(y,pred_ans)
# pred_by_group = [[]]*group_num # arange the prediction by group
# y_by_group = [[]]*group_num
# for i in range(len(x['user_id'])):
# index = id_group_index[x['user_id'][i]]
# pred_by_group[index] = pred_by_group[index] + [pred_ans[i]]
# y_by_group[index] = y_by_group[index] + [y[i]]
# auc_res = []
# precision_res = []
# recall_res = []
# hit_res = []
# for i in range(group_num):
# tmp_auc = metric_fun(y_by_group[i], pred_by_group[i])
#tmp_precision = precision_score(y_by_group[i],np.where(np.array(pred_by_group[i])>0.5,1,0))
#tmp_recall = recall_score(y_by_group[i], np.where(np.array(pred_by_group[i])>0.5,1,0))
# tmp_hitrate = hitrate(y_by_group[i], pred_by_group[i])
#auc_res.append(tmp_auc)
#precision_res.append(tmp_precision)
#recall_res.append(tmp_recall)
# hitrate_res.append(tmp_hitrate)
#auc_group_variance = np.var(auc_res)
#precision_group_variance = np.var(precision_res)
#recall_group_variance = np.var(recall_res)
#hitrate_group_variance = np.var(hitrate_res)
# epoch_logs = {}
# eval_str = ""
######################Print eva###################
# for name, result in eval_result.items():
# epoch_logs["val_" + name] = round(result,4)
# for name in model.metrics:
# eval_str += " - " + "val_" + name + \
# ": {0: .4f}".format(epoch_logs["val_" + name])
# eval_str +=" - " + "acc_group_variance: {0: .4f}".format(auc_group_variance)
#precision_res = [round(i,4) for i in precision_res]
#auc_res = [round(i,4) for i in auc_res]
# recall_res = [round(i,4) for i in recall_res]
#hitrate_res = [round(i,4) for i in hitrate_res]
print("================overall metrics=============")
precision_count = []
recall_count = []
number_each_group = []
hit = 0
for i in range(group_num):#id_group_index是从0-9
precision_count.append(0)
recall_count.append(0)
number_each_group.append(0)
# if isinstance(x, dict):
# x = [x[feature] for feature in feature_index]
# for i in range(len(x)):
# if len(x[i].shape) == 1:
# x[i] = np.expand_dims(x[i], axis=1)
# x = torch.from_numpy(np.concatenate(x, axis=-1)) #将输入转化成矩阵,便于后面取出某一个user的data
# x[i] 第i个user(从0计数)
# x[i][0] user_id ; x[i][1:50]:hist_mid; x[i][51]: seq_length; x[i][-1]:traget_mid
auc_dict = {'pred':[],'actual':[]}
auc_res = []
one_user_acutalres = [1]
for i in range(100):
one_user_acutalres.append(0)
for i in range(group_num):
auc_dict['pred'].append([])
auc_dict['actual'].append([])
auc_res.append(0)
for i in tqdm(range(1,len(y)+1)):
x_thisuser = dict()
#predict函数没有将顺序打乱
x_thisuser['user_id'] = np.array(x['user_id'][i-1]).repeat(101,axis=0)
x_thisuser['mid'] = np.array([x['mid'][i-1]]).repeat(101,axis=0)
x_thisuser['hist_mid'] = np.array([x['hist_mid'][i-1]]).repeat(101,axis=0)
x_thisuser['seq_length'] = np.array([x['seq_length'][i-1]]).repeat(101,axis=0)
this_histmid = id_to_histmid_list[i]
this_histmid.add(x['mid'][i-1].item()) # 把当前预测的一个mid加进去
not_click_mid = all_mid - this_histmid
sample_list = random.sample(list(not_click_mid),100)
x_thisuser['mid'][1:] = sample_list
#储存sample_list的结果,101个,第一个是真实值
pred_list= list(model.predict(x_thisuser,batch_size=128)) # 真实点过的mid预测分数
auc_dict['pred'][id_group_index[i]].extend(pred_list)
pred_actualmid = pred_list[0]
pred_list.sort( reverse = True)
#
rank = pred_list.index(pred_actualmid)+1
if(rank<=10): #
precision_count[id_group_index[i]]+=1#precision+1 #group是从0-9
hit +=1
#print("{0}users-hit:{1}".format(i,hit))
recall_count[id_group_index[i]]+=1#recall加一
number_each_group[id_group_index[i]]+=1#计算每个组人数
recall_res = [round(recall_count[i]/number_each_group[i],4) for i in range(len(recall_count))]
precision_res = [round(precision_count[i]/10/number_each_group[i],4) for i in range(len(precision_count))]
for i in range(group_num):
for j in range(number_each_group[i]):
auc_dict['actual'][i].extend(one_user_acutalres) #test的actual数据
auc_res[i] = roc_auc_score(auc_dict['actual'][i],auc_dict['pred'][i])
if(add_att_constraint=='Y'):
print("With Constraint:")
else:
print("Without Constraint:")
print("===============================recall============================")
print("{0} Groups recall result:{1}".format(group_num,recall_res))
print("recall variance:{0}".format(round(np.var(recall_res),10)))
print("===============================precision============================")
print("{0} Groups precision result:{1}".format(group_num,precision_res))
print("precision variance:{0}".format(round(np.var(precision_res),10)))
print("===============================auc============================")
print("{0} Groups auc result:{1}".format(group_num,auc_res))
print("precision variance:{0}".format(round(np.var(auc_res),10)))
import pdb
pdb.set_trace()
#print("=================recall==================")
# print("auc_res:{0} - auc_group_var:{1:.4f}".format(auc_res,auc_group_variance))
#print("=================recall==================")
#print("recall_res:{0} - recall_group_var:{1:.4f}".format(recall_res,recall_group_variance))
#print("===============precision=================")
#print("precision_res:{0} - precision_group_var:{1:.4f}".format(precision_res,precision_group_variance))
#print("===============hitrate=================")
#print("hitrate_res:{0} - hitrate_group_var:{1}".format(hitrate_res,hitrate_group_variance))
########################calculate the variance among 10 groups |
"""
Created on Wed Feb 8 19:42:38 2017
@author: Gautham
"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
style.use("ggplot")
out = "twitter-feed-textblob.txt"
data = open(out, "r").read()
lines = data.split('\n')
query = lines[0]
fig = plt.figure()
fig.canvas.set_window_title('Textblob Sentiment Analyzer')
fig.suptitle("Graphing Live Tweets of '{}'" .format(query) + " - Textblob Sentiment Analyzer")
ax = fig.add_subplot(1, 1, 1)
def animate(i):
data = open("twitter-feed-textblob.txt", "r").read()
lines = data.split('\n')
trend = []
t = 0
for line in lines[1:]:
if "pos" in line:
t += 1
elif "neg" in line:
t -= 1
trend.append(t)
ax.clear()
ax.plot(trend)
pos_val = float(lines.count("pos")) / len(lines) * 100
neg_val = float(lines.count("neg")) / len(lines) * 100
ax.text(0.35, 0.9, "Pos: {:.4}%, Neg: {:.4}%" .format(pos_val, neg_val),
color='red', transform=ax.transAxes)
#print (pos_val, neg_val)
ani = animation.FuncAnimation(fig, animate, interval=100)
plt.show()
|
# Generated by Django 2.1.4 on 2019-01-12 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0013_auto_20190112_1153'),
]
operations = [
migrations.AddField(
model_name='blogpage',
name='cover_invert_title',
field=models.BooleanField(blank=True, default=True),
preserve_default=False,
),
]
|
from ..validators import must_not_be_none
from marshmallow import ValidationError
from unittest import mock, TestCase
class TestAgentMessage(TestCase):
def test_data_is_blank(self):
try:
must_not_be_none({})
except:
self.fail("must_not_be_none() raised Exception unexpectedly")
def test_data_is_not_blank(self):
with self.assertRaises(ValidationError) as context:
must_not_be_none(None)
assert str(context.exception) == "Data not provided"
|
import os
import requests
import shutil
import gzip
import pprint
import json
pp = pprint.PrettyPrinter(indent=4)
__base_url__ = 'https://www.encodeproject.org/'
__root_dir__ = '/media/dna/ENCODE_idr_20Jan_2017/'
ALLOWED_FILETYPES = ['bed narrowPeak', 'bed broadPeak']
def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
Credits: Brad Chapman for bcbio-nextgen: https://github.com/chapmanb/bcbio-nextgen/blob/master/bcbio/utils.py#L172
Parameters
----------
dname: str
Path of directory to be created
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname
def fetch_idr_record(metadata):
#pp.pprint(metadata)
#pp.pprint(metadata)
try:
files = metadata['files']
print(files)
except KeyError:
pass
biosample_term_name = metadata['biosample_term_name']
assay_term_name = metadata['assay_term_name']
description = metadata['description']
gene_name = metadata['gene_name']#['target']['label']
parent_metadata = {'biosample_term_name': biosample_term_name,
'assay_term_name': assay_term_name,
'description': description,
'gene_name': gene_name}
idr_records = []
file_status = metadata['file_status']
file_type = metadata['file_type']
#output_type = metadata['output_type']
if file_type in ALLOWED_FILETYPES and file_status == 'released':
dataset = metadata['dataset']
dataset = dataset.replace('experiments','').replace('/','')
href = metadata['href']
title = metadata['peakfilename']
assembly = metadata['assembly']
idr_records.append({'href': href, 'metadata':metadata, 'parent_metadata': parent_metadata, 'dataset': dataset, 'peakfilename': title, 'assembly': assembly})
return idr_records
def download_peakfile(source_url, filename, destination_dir):
"""Download peakfile from encode"""
response = requests.get(source_url, stream=True)
with open(os.path.join(destination_dir, filename), 'wb') as f:
shutil.copyfileobj(response.raw, f)
with gzip.open(os.path.join(destination_dir, filename), 'rb') as in_file:
with open(os.path.join(destination_dir, filename.replace('.gz','')), 'wb') as out_file:
out_file.write( in_file.read() )
del response
def download_idr_tfs(root_dir, metadata):
"""Download all tfs with idr called peaks"""
idr_records = fetch_idr_record(metadata)
## Theere is only one IDR per sample
assert len(idr_records) == 1
for idr_record in idr_records:
dataset = idr_record['dataset']
peakfilename = idr_record['peakfilename'] + '.bed.gz'
dataset_dir = os.path.join(root_dir, dataset)
safe_makedir(dataset_dir)
source_url = __base_url__ + idr_record['href']
download_peakfile(source_url, peakfilename, dataset_dir)
with open(os.path.join(dataset_dir, idr_record['peakfilename']+'.assembly'), 'w') as fh:
fh.write(idr_record['assembly'])
save_metadata_json(metadata, dataset_dir)
return {'assembly': idr_record['assembly'],'bedfile': os.path.join(dataset_dir, peakfilename.replace('.gz',''))}
def get_experiment(experiment_id):
"""Get and save metadata for an experiment"""
req = requests.get("{}experiments/{}/?format=json".format(__base_url__, experiment_id))
metadata = req.json()
return metadata
def save_metadata_json(metadata, directory):
"""Save metadata locally"""
with open('metadata.json', 'w') as outfile:
json.dump(metadata, outfile)
def filter_metadata(metadata,
filter_dict={'files.output_type': 'optimal idr thresholded peaks'}):
"""Can think of supporting one nested key for now"""
filter_keys = filter_dict.keys()[0].split('.')
#print(filter_keys)
value = filter_dict.values()[0]
files = metadata['files']
biosample_term_name = metadata['biosample_term_name']
assay_term_name = metadata['assay_term_name']
try:
description = metadata['description']
except:
description = ''
gene_name = metadata['target']['label']
filter_records = []
for f in files:
file_status = f['status']
file_type = f['file_type']
#output_type = f['output_type']
#print(f.keys())
#filter_1 = f[filter_keys[0]]
filter_2 = f[filter_keys[1]]
if filter_2 == value and file_type in ALLOWED_FILETYPES and file_status == 'released':
dataset = f['dataset']
dataset = dataset.replace('experiments','').replace('/','')
href = f['href']
title = f['title']
assembly = f['assembly']
filter_records.append({'href': href,
'metadata':f,
'parent_metadata': metadata,
'dataset': dataset,
'peakfilename': title,
'file_type': file_type,
'file_status': file_status,
'biosample_term_name': biosample_term_name,
'assay_term_name': assay_term_name,
'gene_name': gene_name,
'description': description,
'assembly': assembly})
return filter_records
def search_encode_tfs():
url = __base_url__ + '/search?type=Experiment&assay_title=ChIP-seq&limit=all&status=released&target.investigated_as=transcription+factor&format=json'#&frame=embedded
req = requests.get(url)
resp_json = req.json()
all_samples = resp_json['@graph']
all_tfs = [sample['target']['label'] for sample in all_samples]
all_tfs = set(all_tfs)
all_experiments = [sample['@id'].strip().replace('/','').replace('experiments', '') for sample in all_samples]
for experiment in all_experiments:
print(experiment)
expt_metadata = get_experiment(experiment)
metadata = filter_metadata(expt_metadata)
#download_idr_tfs(__root_dir__, metadata)
for m in metadata:
download_idr_tfs(__root_dir__, m)
if __name__ == '__main__':
search_encode_tfs()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 9 14:29:16 2021
@author: Giray Balcı
"""
# %%
# from fpdf import FPDF
import subprocess
import time
import os
# from datetime import datetime #for pdf autotime stamp
fileName = "exampleCircuit.asc"
dir_XVIIx64 = "C:\Program Files\LTC\LTspiceXVII\\"
exeName = "XVIIx64.exe"
# %%
from SpiceHandler import SpiceHandler
sh = SpiceHandler(dir_XVIIx64, exeName, fileName)
myList = sh.getComponentList()
# %%
meas = sh.generateMeasCommand()
newName = sh.createNewSimFile(meas)
sh.runSimulation(newName)
# %%
results = sh.getSimulationResults()
# %%
from PDFReporter import PDFReporter
pr = PDFReporter(newName, results, "12.07.2021")
pr.export()
# DEBUG purposes
# automatically open, wait for visual inspection, close
DEBUG = True
if(DEBUG):
subprocess.Popen(['Tutorial.pdf'], shell=True)
time.sleep(10)
os.system("taskkill /im " + "AcroRd32.exe" + " /F" )
|
import setuptools
# Supports editable installs.
setuptools.setup()
|
from isbnlib import *
import easygui as eg
import sys
import csv
import os
# clean-up old files
os.remove("YBP_HOLDINGS.txt")
# select file
csv_file_path = eg.fileopenbox(msg='Select .CSV file',
title='Select File',
filetypes='*.csv')
if csv_file_path == None:
print("No file selected.")
sys.exit()
# open input and output files
csv_file = open(csv_file_path, "r")
output_file = open("YBP_HOLDINGS.txt", "w")
# initiate csv object and parse
csv_object = csv.reader(csv_file, delimiter=';')
for row in csv_object:
for isbn in row:
isbn = isbn.replace(" ", "")
bad_isbn = notisbn(isbn, level='strict')
if bad_isbn == True: # Invalid ISBN
continue
if bad_isbn == False: # Valid ISBN
isbn = to_isbn13(isbn)
output_file.write(f"{isbn}\n")
break
# finish
output_file.close()
eg.msgbox("Done.", "Finished") |
# *******************************************************************************
# Copyright 2017 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
#
# @microservice: py-core-data-client library
# @author: Tyler Cox, Dell
# @version: 1.0.0
# *******************************************************************************
import static org.edgexfoundry.test.data.ValueDescriptorData.self.TEST_LABELS
import static org.edgexfoundry.test.data.ValueDescriptorData.self.TEST_NAME
import static org.edgexfoundry.test.data.ValueDescriptorData.self.TEST_UOMLABEL
import static org.edgexfoundry.test.data.ValueDescriptorData.checkTestData
import static org.junit.Assert.self.assertEqual
import static org.junit.Assert.self.assertNotNull
import static org.junit.Assert.self.assertTrue
import java.lang.reflect.Field
import java.util.List
import javax.ws.rs.NotFoundException
from controller import ValueDescriptorClient
from controller.impl import ValueDescriptorClientImpl
from domain.common import ValueDescriptor
from test.category import RequiresCoreDataRunning
from test.category import RequiresMongoDB
from test.data import DeviceData
from test.data import ValueDescriptorData
import org.junit.After
import org.junit.Before
import org.junit.Test
import org.junit.experimental.categories.Category
@Category({RequiresMongoDB.class, RequiresCoreDataRunning.class})
class ValueDescriptorClientTest {
private static final String ENDPT = "http://localhost:48080/api/v1/valuedescriptor"
private ValueDescriptorClient client
private String id
# setup tests the add function
@Before
def setUp() throws Exception {
client = new ValueDescriptorClientImpl()
setURL()
ValueDescriptor valueDescriptor = ValueDescriptorData.newTestInstance()
id = client.add(valueDescriptor)
self.assertNotNull(id, "Value Descriptor did not get created correctly")
private void setURL() throws Exception {
Class<?> clientClass = client.getClass()
Field temp = clientClass.getDeclaredField("url")
temp.setAccessible(true)
temp.set(client, ENDPT)
# cleanup tests the delete function
@After
def cleanup():
List<ValueDescriptor> valueDescriptors = client.valueDescriptors()
valueDescriptors.forEach((valueDescriptor) -> client.delete(valueDescriptor.getId()))
def testValueDescriptor():
ValueDescriptor vd = client.valueDescriptor(id)
checkTestData(vd, id)
def testValueDescriptorWithUnknownnId():
client.valueDescriptor("nosuchid")
def testValueDescriptors():
List<ValueDescriptor> vds = client.valueDescriptors()
self.assertEqual(1, len(vds), "Find all not returning a list with one value descriptor")
checkTestData(vds.get(0), id)
def testValueDescriptorForName():
ValueDescriptor vd = client.valueDescriptorByName(self.TEST_NAME)
checkTestData(vd, id)
def testValueDescriptorForNameWithNoneMatching():
client.valueDescriptorByName("badname")
def testValueDescriptorsByLabel():
List<ValueDescriptor> vds = client.valueDescriptorByLabel(self.TEST_LABELS[0])
self.assertEqual(1, len(vds), "Find by label not returning a list with one value descriptor")
checkTestData(vds.get(0), id)
def testValueDescriptorsForDeviceByName():
client.valueDescriptorsForDeviceByName(DeviceData.self.TEST_NAME)
def testValueDescriptorsForDeviceById():
client.valueDescriptorsForDeviceById("123")
def testValueDescriptorsByLabelWithNoneMatching():
List<ValueDescriptor> vds = client.valueDescriptorByLabel("badlabel")
self.assertTrue(vds.isEmpty(), "ValueDescriptor found with bad label")
def testValueDescriptorsByUoMLabel():
List<ValueDescriptor> vds = client.valueDescriptorByUOMLabel(self.TEST_UOMLABEL)
self.assertEqual(1, len(vds), "Find by UOM label not returning a list with one value descriptor")
checkTestData(vds.get(0), id)
def testValueDescriptorsByUOMLabelWithNoneMatching():
List<ValueDescriptor> vds = client.valueDescriptorByUOMLabel("badlabel")
self.assertTrue(vds.isEmpty(), "ValueDescriptor found with bad UOM label")
# TODO - in the future have Metadata up and also test with devices
# associated
def valueDescriptorsForDeviceByName():
client.valueDescriptorsForDeviceByName("unknowndevice")
# TODO - in the future have Metadata up and also test with devices
# associated
def valueDescriptorsForDeviceById():
client.valueDescriptorsForDeviceById("unknowndeviceid")
def testDeleteWithNone():
client.delete("badid")
def testUpdate():
ValueDescriptor vd = client.valueDescriptor(id)
vd.setOrigin(12345)
self.assertTrue(client.update(vd), "Update did not complete successfully")
ValueDescriptor vd2 = client.valueDescriptor(id)
self.assertEqual(12345, vd2.getOrigin(), "Update did not work correclty")
self.assertNotNull(vd2.getModified(), "Modified date is None")
self.assertNotNull(vd2.getCreated(), "Create date is None")
self.assertTrue("Modified date and create date should be different after update",
vd2.getModified() != vd2.getCreated())
def testDeleteByName():
self.assertTrue(client.deleteByName(self.TEST_NAME), "ValueDescriptor not deleted by name")
def testDeleteByNameWithNone():
client.deleteByName("badname")
}
|
import time
import traceback
import numpy as np
from math import log, ceil
from mfeshb.optimizer.base.mq_base_facade import mqBaseFacade
from mfeshb.optimizer.base.utils import sample_configurations
from openbox.utils.config_space import ConfigurationSpace
class Hyperband(mqBaseFacade):
""" The implementation of Hyperband (HB).
The paper can be found in http://www.jmlr.org/papers/volume18/16-558/16-558.pdf .
"""
def __init__(self, objective_func,
config_space: ConfigurationSpace,
R,
eta=3,
num_iter=10000,
random_state=1,
method_id='Hyperband',
restart_needed=True,
time_limit_per_trial=600,
runtime_limit=None,
ip='',
port=13579,
authkey=b'abc',):
max_queue_len = 1000 # conservative design
super().__init__(objective_func, method_name=method_id,
restart_needed=restart_needed, time_limit_per_trial=time_limit_per_trial,
runtime_limit=runtime_limit,
max_queue_len=max_queue_len, ip=ip, port=port, authkey=authkey)
self.seed = random_state
self.config_space = config_space
self.config_space.seed(self.seed)
self.num_iter = num_iter
self.R = R # Maximum iterations per configuration
self.eta = eta # Define configuration downsampling rate (default = 3)
self.logeta = lambda x: log(x) / log(self.eta)
self.s_max = int(self.logeta(self.R))
self.B = (self.s_max + 1) * self.R
self.incumbent_configs = list()
self.incumbent_perfs = list()
# This function can be called multiple times
def iterate(self, skip_last=0):
for s in reversed(range(self.s_max + 1)):
# Initial number of configurations
n = int(ceil(self.B / self.R / (s + 1) * self.eta ** s))
# Initial number of iterations per config
r = self.R * self.eta ** (-s)
# Choose next n configurations.
T = self.choose_next(n)
incumbent_loss = np.inf
extra_info = None
last_run_num = None
initial_run = True
for i in range((s + 1) - int(skip_last)): # Changed from s + 1
# Run each of the n configs for <iterations>
# and keep best (n_configs / eta) configurations.
n_configs = n * self.eta ** (-i)
n_iteration = r * self.eta ** (i)
n_iter = n_iteration
if last_run_num is not None and not self.restart_needed:
n_iter -= last_run_num
last_run_num = n_iteration
self.logger.info("%s: %d configurations x %d iterations each"
% (self.method_name, int(n_configs), int(n_iteration)))
ret_val, early_stops = self.run_in_parallel(T, n_iter, extra_info, initial_run)
initial_run = False
val_losses = [item['loss'] for item in ret_val]
ref_list = [item['ref_id'] for item in ret_val]
self.update_incumbent_before_reduce(T, val_losses, n_iteration)
# select a number of best configurations for the next loop
# filter out early stops, if any
indices = np.argsort(val_losses)
if len(T) == sum(early_stops):
break
if len(T) >= self.eta:
indices = [i for i in indices if not early_stops[i]]
T = [T[i] for i in indices]
extra_info = [ref_list[i] for i in indices]
reduced_num = int(n_configs / self.eta)
T = T[0:reduced_num]
extra_info = extra_info[0:reduced_num]
else:
T = [T[indices[0]]]
extra_info = [ref_list[indices[0]]]
val_losses = [val_losses[i] for i in indices][0:len(T)] # update: sorted
incumbent_loss = val_losses[0]
self.update_incumbent_after_reduce(T, incumbent_loss)
def run(self, skip_last=0):
try:
for iter in range(1, 1 + self.num_iter):
self.logger.info('-' * 50)
self.logger.info("%s algorithm: %d/%d iteration starts" % (self.method_name, iter, self.num_iter))
start_time = time.time()
self.iterate(skip_last=skip_last)
time_elapsed = (time.time() - start_time) / 60
self.logger.info("Iteration took %.2f min." % time_elapsed)
self.save_intermediate_statistics()
for i, obj in enumerate(self.incumbent_perfs):
self.logger.info(
'%d-th config: %s, obj: %f.' % (i + 1, str(self.incumbent_configs[i]), self.incumbent_perfs[i]))
except Exception as e:
print(e)
print(traceback.format_exc())
self.logger.error(traceback.format_exc())
def choose_next(self, num_config):
# Sample n configurations uniformly.
return sample_configurations(self.config_space, num_config)
def update_incumbent_before_reduce(self, T, val_losses, n_iteration):
return
def update_incumbent_after_reduce(self, T, incumbent_loss):
"""
update: T is sorted
"""
if not np.isnan(incumbent_loss):
self.incumbent_configs.append(T[0])
self.incumbent_perfs.append(incumbent_loss)
def get_incumbent(self, num_inc=1):
assert (len(self.incumbent_perfs) == len(self.incumbent_configs))
indices = np.argsort(self.incumbent_perfs)
configs = [self.incumbent_configs[i] for i in indices[0:num_inc]]
perfs = [self.incumbent_perfs[i] for i in indices[0: num_inc]]
return configs, perfs
|
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response, render, get_object_or_404
from models import ContentModel, ModelVersion
from datetime import datetime, date
import json
#--------------------------------------------------------------------------------------
# Query by label
#--------------------------------------------------------------------------------------
def model_by_label(content_model, model_version=None):
if model_version is not None:
cm = ContentModel.objects.values('id', 'label')
mv = ModelVersion.objects.values('id', 'content_model_id', 'version')
this_cm = [x for x in cm if x['label'] == content_model]
return [x for x in mv if x['content_model_id'] == this_cm[0]['id'] and x['version'] == model_version]
else:
cm = ContentModel.objects.values('id', 'label')
return [x for x in cm if x['label'] == content_model]
#--------------------------------------------------------------------------------------
# Expose all the available ContentModels
#--------------------------------------------------------------------------------------
def get_all_models(request, extension):
all_models = ContentModel.objects.all()
return view_models(all_models, extension)
#--------------------------------------------------------------------------------------
# Expose a single ContentModel
#--------------------------------------------------------------------------------------
def get_model(request, content_model, extension):
query = model_by_label(content_model)
model_version_pk = query[0]['id']
contentmodels = ContentModel.objects.filter(pk=model_version_pk)
if not contentmodels: raise Http404
return view_models(contentmodels, extension)
#--------------------------------------------------------------------------------------
# Choose the appropriate format to expose based on the requested extension
#--------------------------------------------------------------------------------------
def view_models(contentmodels, extension):
if extension == 'json':
return as_json(contentmodels)
elif extension == 'xml':
return as_atom(contentmodels)
elif extension == 'drupal':
return fer_drupal(contentmodels)
else:
return as_html(contentmodels)
#--------------------------------------------------------------------------------------
# Convert a set of ContentModel instances to JSON and send as an HttpResponse
#--------------------------------------------------------------------------------------
def as_json(contentmodels):
data = [cm.serialized() for cm in contentmodels]
return HttpResponse(json.dumps(data), mimetype='application/json')
#--------------------------------------------------------------------------------------
# Convert a set of ContentModel instances to HTML and send as an HttpResponse
#--------------------------------------------------------------------------------------
def as_html(contentmodels):
return render_to_response('contentmodels.html', {'contentmodels': contentmodels})
#--------------------------------------------------------------------------------------
# Convert a set of ContentModel instances to XML (Atom) and send as an HttpResponse
#--------------------------------------------------------------------------------------
def as_atom(contentmodels):
return render_to_response(
'contentmodels.xml',
{'feed': AtomFeed(contentmodels=contentmodels), 'contentmodels': contentmodels},
mimetype="application/xml"
)
#--------------------------------------------------------------------------------------
# Convert a set of ContentModel instances to XML (fer Drupal) and send as an HttpResponse
#--------------------------------------------------------------------------------------
def fer_drupal(contentmodels):
return render_to_response(
'ferDrupal.xml', {'contentmodels': contentmodels}, mimetype="application/xml"
)
#--------------------------------------------------------------------------------------
# Class for generating an Atom Feed. Default values as shown, can be adjusted by
# keyword-args on creation.
#--------------------------------------------------------------------------------------
class AtomFeed(object):
# These are default values for feed attributes
title = "Content Models"
subtitle = "USGIN Content Models Atom Feed"
url = "http://schemas.usgin.org/contentmodels.xml"
id = "http://schemas.usgin.org/contentmodels.xml"
date = datetime.now().isoformat()
author_name = "Ryan Clark"
author_email = "metadata@usgin.org"
contentmodels = ContentModel.objects.all()
# Constructor function. Map kwargs to this instance to overwrite defaults
def __init__(self, **kwargs):
# Loop through arguments passed in
for arg in kwargs:
# Assign them to this instance, overwriting default values
setattr(self, arg, kwargs[arg])
# Set date and id
self.set_date()
self.set_id_and_url()
# Function to set the feed's updated date based on the ContentModels passed in
def set_date(self):
# Count the number of ContentModels that were passed in
number_of_models = self.contentmodels.count()
# There is more than one ContentModel
if number_of_models > 1:
# Sort the ContentModels by date
sortable = list(self.contentmodels)
sortable.sort(key=lambda cm: cm.date_updated())
# Set the feed's date to the most recent ContentModel's updated date
self.date = sortable[len(sortable) - 1].iso_date_updated()
# There is one ContentModel
elif number_of_models == 1:
# This ContentModel's date is what we want
self.date = self.contentmodels[0].iso_date_updated()
# Function to set the feed's id and url
def set_id_and_url(self):
# Use the default values unless this is a Feed containing only one ContenModel
if self.contentmodels.count() == 1:
# Set the feed's id and url to that of the passed in ContentModel
self.url = self.contentmodels[0].my_atom()
self.id = self.contentmodels[0].my_atom()
#--------------------------------------------------------------------------------------
# Homepage
#--------------------------------------------------------------------------------------
def homepage(req):
def recent(cm):
if cm.date_updated():
return cm.date_updated()
else:
return date(1900, 1, 1)
models = list(ContentModel.objects.all())
models.sort(key=lambda cm: recent(cm), reverse=True)
return render_to_response('home.html', {'recent_models': models[:3]})
#--------------------------------------------------------------------------------------
# Model view page
#--------------------------------------------------------------------------------------
def models(req):
return render_to_response('models.html', {'contentmodels': ContentModel.objects.all()})
#--------------------------------------------------------------------------------------
# Swagger API Documentation
#--------------------------------------------------------------------------------------
def swagger(request, path):
base_url = "http://%s" % request.META.get("HTTP_HOST")
swagger_url = "%s/swagger" % base_url
if path == "":
path = "api-docs.json"
return render(request, "swagger/%s" % path, {"host": base_url, "swagger": swagger_url }, content_type="application/json")
def swaggerui(request):
swagger_url = "http://%s/swagger" % request.META.get("HTTP_HOST")
return render(request, "swagger/swagger.html", {"swagger": swagger_url})
#--------------------------------------------------------------------------------------
# FeatureCatalogues
#--------------------------------------------------------------------------------------
def get_feature_catalog(request, content_model, model_version):
query = model_by_label(content_model, model_version)
model_version_pk = query[0]['id']
v = get_object_or_404(ModelVersion, pk=model_version_pk)
return render(request, "featureCatalog.xml", {"version": v}, content_type="text/xml")
#--------------------------------------------------------------------------------------
# Tools page
#--------------------------------------------------------------------------------------
def tools(request):
return render(request, "tools.html", {}) |
import os, sys, qtmodern.styles, qtmodern.windows, warnings, logging
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
from argparse import ArgumentParser
#
from howdy import resourceDir
from howdy.core import returnQAppWithFonts
from howdy.email import email_demo_gui
#
warnings.simplefilter( 'ignore' )
def main( ):
parser = ArgumentParser( )
parser.add_argument('--info', dest='do_info', action='store_true',
default = False, help = 'Run info mode if chosen.')
parser.add_argument('--noverify', dest='do_verify', action='store_false',
default = True, help = 'Do not verify SSL transactions if chosen.')
args = parser.parse_args( )
logger = logging.getLogger( )
if args.do_info: logger.setLevel( logging.INFO )
#
app = returnQAppWithFonts( )
app.setAttribute(Qt.AA_UseHighDpiPixmaps)
icn = QIcon( os.path.join(
resourceDir, 'icons', 'howdy_email_demo_gui_SQUARE_VECTA.svg' ) )
app.setWindowIcon( icn )
qtmodern.styles.dark( app )
hedg = email_demo_gui.HowdyEmailDemoGUI( verify = args.do_verify )
mw = qtmodern.windows.ModernWindow( hedg )
mw.show( )
result = app.exec_( )
|
"""Test the UniFi Protect lock platform."""
# pylint: disable=protected-access
from __future__ import annotations
from unittest.mock import AsyncMock, Mock
from pyunifiprotect.data import Doorlock, LockStatusType
from homeassistant.components.unifiprotect.const import DEFAULT_ATTRIBUTION
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_ENTITY_ID,
STATE_JAMMED,
STATE_LOCKED,
STATE_LOCKING,
STATE_UNAVAILABLE,
STATE_UNLOCKED,
STATE_UNLOCKING,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .utils import MockUFPFixture, assert_entity_counts, init_entry
async def test_lock_setup(
hass: HomeAssistant,
ufp: MockUFPFixture,
doorlock: Doorlock,
unadopted_doorlock: Doorlock,
):
"""Test lock entity setup."""
await init_entry(hass, ufp, [doorlock, unadopted_doorlock])
assert_entity_counts(hass, Platform.LOCK, 1, 1)
unique_id = f"{doorlock.mac}_lock"
entity_id = "lock.test_lock_lock"
entity_registry = er.async_get(hass)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_UNLOCKED
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_lock_locked(
hass: HomeAssistant,
ufp: MockUFPFixture,
doorlock: Doorlock,
unadopted_doorlock: Doorlock,
):
"""Test lock entity locked."""
await init_entry(hass, ufp, [doorlock, unadopted_doorlock])
assert_entity_counts(hass, Platform.LOCK, 1, 1)
new_lock = doorlock.copy()
new_lock.lock_status = LockStatusType.CLOSED
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = new_lock
ufp.api.bootstrap.doorlocks = {new_lock.id: new_lock}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get("lock.test_lock_lock")
assert state
assert state.state == STATE_LOCKED
async def test_lock_unlocking(
hass: HomeAssistant,
ufp: MockUFPFixture,
doorlock: Doorlock,
unadopted_doorlock: Doorlock,
):
"""Test lock entity unlocking."""
await init_entry(hass, ufp, [doorlock, unadopted_doorlock])
assert_entity_counts(hass, Platform.LOCK, 1, 1)
new_lock = doorlock.copy()
new_lock.lock_status = LockStatusType.OPENING
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = new_lock
ufp.api.bootstrap.doorlocks = {new_lock.id: new_lock}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get("lock.test_lock_lock")
assert state
assert state.state == STATE_UNLOCKING
async def test_lock_locking(
hass: HomeAssistant,
ufp: MockUFPFixture,
doorlock: Doorlock,
unadopted_doorlock: Doorlock,
):
"""Test lock entity locking."""
await init_entry(hass, ufp, [doorlock, unadopted_doorlock])
assert_entity_counts(hass, Platform.LOCK, 1, 1)
new_lock = doorlock.copy()
new_lock.lock_status = LockStatusType.CLOSING
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = new_lock
ufp.api.bootstrap.doorlocks = {new_lock.id: new_lock}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get("lock.test_lock_lock")
assert state
assert state.state == STATE_LOCKING
async def test_lock_jammed(
hass: HomeAssistant,
ufp: MockUFPFixture,
doorlock: Doorlock,
unadopted_doorlock: Doorlock,
):
"""Test lock entity jammed."""
await init_entry(hass, ufp, [doorlock, unadopted_doorlock])
assert_entity_counts(hass, Platform.LOCK, 1, 1)
new_lock = doorlock.copy()
new_lock.lock_status = LockStatusType.JAMMED_WHILE_CLOSING
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = new_lock
ufp.api.bootstrap.doorlocks = {new_lock.id: new_lock}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get("lock.test_lock_lock")
assert state
assert state.state == STATE_JAMMED
async def test_lock_unavailable(
hass: HomeAssistant,
ufp: MockUFPFixture,
doorlock: Doorlock,
unadopted_doorlock: Doorlock,
):
"""Test lock entity unavailable."""
await init_entry(hass, ufp, [doorlock, unadopted_doorlock])
assert_entity_counts(hass, Platform.LOCK, 1, 1)
new_lock = doorlock.copy()
new_lock.lock_status = LockStatusType.NOT_CALIBRATED
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = new_lock
ufp.api.bootstrap.doorlocks = {new_lock.id: new_lock}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get("lock.test_lock_lock")
assert state
assert state.state == STATE_UNAVAILABLE
async def test_lock_do_lock(
hass: HomeAssistant,
ufp: MockUFPFixture,
doorlock: Doorlock,
unadopted_doorlock: Doorlock,
):
"""Test lock entity lock service."""
await init_entry(hass, ufp, [doorlock, unadopted_doorlock])
assert_entity_counts(hass, Platform.LOCK, 1, 1)
doorlock.__fields__["close_lock"] = Mock()
doorlock.close_lock = AsyncMock()
await hass.services.async_call(
"lock",
"lock",
{ATTR_ENTITY_ID: "lock.test_lock_lock"},
blocking=True,
)
doorlock.close_lock.assert_called_once()
async def test_lock_do_unlock(
hass: HomeAssistant,
ufp: MockUFPFixture,
doorlock: Doorlock,
unadopted_doorlock: Doorlock,
):
"""Test lock entity unlock service."""
await init_entry(hass, ufp, [doorlock, unadopted_doorlock])
assert_entity_counts(hass, Platform.LOCK, 1, 1)
new_lock = doorlock.copy()
new_lock.lock_status = LockStatusType.CLOSED
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = new_lock
ufp.api.bootstrap.doorlocks = {new_lock.id: new_lock}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
new_lock.__fields__["open_lock"] = Mock()
new_lock.open_lock = AsyncMock()
await hass.services.async_call(
"lock",
"unlock",
{ATTR_ENTITY_ID: "lock.test_lock_lock"},
blocking=True,
)
new_lock.open_lock.assert_called_once()
|
from typing import Callable
from fastapi import FastAPI
from motor.motor_asyncio import AsyncIOMotorClient
from vacations.config import (
MONGODB_URI,
HISTORY_COLLECTION,
USERS_COLLECTION,
)
def start_app_handler(app: FastAPI) -> Callable:
async def startup() -> None:
app.state.motor = AsyncIOMotorClient(MONGODB_URI)
db = app.state.motor.get_default_database()
app.state.history = db[HISTORY_COLLECTION]
app.state.users = db[USERS_COLLECTION]
return startup
def stop_app_handler(app: FastAPI) -> Callable:
async def shutdown() -> None:
app.state.motor.close()
return shutdown
|
from paropt_sdk.utils.auth import do_login_flow, make_authorizer, logout
from paropt_sdk.config import (check_logged_in, PAROPT_SERVICE_ADDRESS, CLIENT_ID)
from globus_sdk.base import BaseClient, slash_join
from mdf_toolbox import login, logout
from tempfile import mkstemp
import pickle as pkl
import pandas as pd
import requests
import codecs
import json
import os
import warnings
# ignore SSL warnings
# b/c server is currently using self signed cert, requests with arg valid=False raise a warning
# about the security of ignoring verifying the SSL cert
warnings.filterwarnings("ignore")
_token_dir = os.path.expanduser("~/.paropt/credentials")
class ParoptClient(BaseClient):
"""Main class for interacting with the paropt service
Holds helper operations for performing common tasks with the paropt service.
"""
def __init__(self, authorizer=None, http_timeout=None,
force_login=False, **kwargs):
"""Initialize the client
Args:
authorizer (:class:`GlobusAuthorizer
<globus_sdk.authorizers.base.GlobusAuthorizer>`):
An authorizer instance used to communicate with paropt.
If ``None``, will be created.
http_timeout (int): Timeout for any call to service in seconds. (default is no timeout)
force_login (bool): Whether to force a login to get new credentials.
A login will always occur if ``authorizer``
are not provided.
Keyword arguments are the same as for BaseClient.
"""
if force_login or not authorizer or not search_client:
dlhub_scope = "https://auth.globus.org/scopes/81fc4156-a623-47f2-93ad-7184118226ba/auth"
auth_res = login(services=[dlhub_scope],
app_name="paropt",
client_id=CLIENT_ID, clear_old_tokens=force_login,
token_dir=_token_dir)
dlh_authorizer = auth_res['dlhub_org']
super(ParoptClient, self).__init__("paropt",
authorizer=dlh_authorizer,
http_timeout=http_timeout, base_url=PAROPT_SERVICE_ADDRESS,
**kwargs)
def logout(self):
"""Remove credentials from your local system"""
logout()
def getOrCreateExperiment(self, experiment):
return self.post('/experiments',
json_body=experiment,
headers={'content-type': 'application/json'})
def runTrial(self, experiment_id, optimizer):
return self.post(f'/experiments/{experiment_id}/trials',
json_body=optimizer,
headers={'content-type': 'application/json'})
def getTrials(self, experiment_id):
return self.get(f'/experiments/{experiment_id}/trials')
def getRunningExperiments(self):
return self.get('/jobs/running')
def getFailedExperiments(self):
return self.get('/jobs/failed')
def getQueuedExperiments(self):
return self.get('/jobs/queued')
def getJob(self, job_id):
return self.get(f'/jobs/{job_id}')
def getExperimentJob(self, experiment_id):
return self.get(f'/experiments/{experiment_id}/job') |
import frappe
from frappe import _
def execute():
frappe.reload_doctype("System Settings")
settings = frappe.get_doc("System Settings")
settings.db_set("app_name", "OryxERP", commit=True)
|
# ReadRecord.py
# --------------------------------------------------------------------------
#
# Written: minjie
# Date: May 2016
# A procedure which parses a ground motion record from the PEER
# strong motion database by finding dt in the record header, then
# echoing data values to the output file.
#
# Formal arguments
# inFilename -- file which contains PEER strong motion record
# outFilename -- file to be written in format G3 can read
# Return values
# dt -- time step determined from file header
# nPts -- number of data points from file header
#
# Assumptions
# The header in the PEER record is, e.g., formatted as 1 of following:
# 1) new PGA database
# PACIFIC ENGINEERING AND ANALYSIS STRONG-MOTION DATA
# IMPERIAL VALLEY 10/15/79 2319, EL CENTRO ARRAY 6, 230
# ACCELERATION TIME HISTORY IN UNITS OF G
# 3930 0.00500 NPTS, DT
# 2) old SMD database
# PACIFIC ENGINEERING AND ANALYSIS STRONG-MOTION DATA
# IMPERIAL VALLEY 10/15/79 2319, EL CENTRO ARRAY 6, 230
# ACCELERATION TIME HISTORY IN UNITS OF G
# NPTS= 3930, DT= .00500 SEC
def ReadRecord(inFilename, outFilename):
dt = 0.0
npts = 0
# Open the input file and catch the error if it can't be read
inFileID = open(inFilename, 'r')
# Open output file for writing
outFileID = open(outFilename, 'w')
# Flag indicating dt is found and that ground motion
# values should be read -- ASSUMES dt is on last line
# of header!!!
flag = 0
# Look at each line in the file
for line in inFileID:
if line == '\n':
# Blank line --> do nothing
continue
elif flag == 1:
# Echo ground motion values to output file
outFileID.write(line)
else:
# Search header lines for dt
words = line.split()
lengthLine = len(words)
if lengthLine >= 4:
if words[0] == 'NPTS=':
# old SMD format
for word in words:
if word != '':
# Read in the time step
if flag == 1:
dt = float(word)
break
if flag == 2:
npts = int(word.strip(','))
flag = 0
# Find the desired token and set the flag
if word == 'DT=' or word == 'dt':
flag = 1
if word == 'NPTS=':
flag = 2
elif words[-1] == 'DT':
# new NGA format
count = 0
for word in words:
if word != '':
if count == 0:
npts = int(word)
elif count == 1:
dt = float(word)
elif word == 'DT':
flag = 1
break
count += 1
inFileID.close()
outFileID.close()
return dt, npts
|
import bs4
import os
import argparse
script = """
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-77536074-1', 'auto');
ga('send', 'pageview');
"""
# method from https://stackoverflow.com/questions/35355225/edit-and-create-html-file-using-python
# and also from https://stackoverflow.com/questions/19123245/inserting-into-a-html-file-using-python
def add_google_analytics(file_path):
with open(file_path) as in_file:
txt = in_file.read()
soup = bs4.BeautifulSoup(txt,"lxml")
soup.append("\n")
new_script = soup.new_tag("script")
soup.append(new_script)
last_script = soup.findAll("script")[-1]
last_script.insert(0, script)
with open(file_path, "w") as out_file:
out_file.write(str(soup))
file_name = file_path.split("/")[-1]
print "\tadded Google Analytics script to " + str(file_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file_path',type=str,default="")
args = parser.parse_args()
add_google_analytics(args.file_path)
|
#
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
import atexit
import gettext
import os
import random
import string
import tempfile
from otopi import constants as otopicons
from otopi import filetransaction
from otopi import util
from otopi import plugin
from ovirt_engine import configfile
from ovirt_engine import util as outil
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.grafana_dwh import constants as ogdwhcons
from ovirt_setup_lib import dialog
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-dwh')
@util.export
class Plugin(plugin.PluginBase):
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._sso_config = None
self._register_sso_client = False
self._uninstall_files = []
self._restart_remote_engine = False
@staticmethod
def _generatePassword():
return ''.join([
random.SystemRandom().choice(
string.ascii_letters +
string.digits
) for i in range(22)
])
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
ogdwhcons.ConfigEnv.ADMIN_PASSWORD,
None
)
self.environment.setdefault(
ogdwhcons.ConfigEnv.CONF_SECRET_KEY,
self._generatePassword()
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
condition=lambda self: self.environment[ogdwhcons.CoreEnv.ENABLE],
before=(
osetupcons.Stages.DIALOG_TITLES_S_MISC,
),
after=(
oengcommcons.Stages.NETWORK_OWNERS_CONFIG_CUSTOMIZED,
),
)
def _customization_url(self):
self._grafana_url = 'https://{grafana_fqdn}{path}/'.format(
grafana_fqdn=self.environment[
ogdwhcons.ConfigEnv.GRAFANA_FQDN
],
path=ogdwhcons.Const.GRAFANA_URI_PATH,
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
osetupcons.Stages.DIALOG_TITLES_E_MISC,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_MISC,
),
condition=lambda self: (
self.environment[ogdwhcons.ConfigEnv.ADMIN_PASSWORD] is None and
self.environment[ogdwhcons.CoreEnv.ENABLE] and
self.environment[ogdwhcons.ConfigEnv.NEW_DATABASE]
),
)
def _customization_admin_password(self):
password = None
if self.environment.get(oenginecons.ConfigEnv.ADMIN_PASSWORD):
use_engine_admin_password = dialog.queryBoolean(
dialog=self.dialog,
name='GRAFANA_USE_ENGINE_ADMIN_PASSWORD',
note=_(
'Use Engine admin password as initial Grafana admin '
'password (@VALUES@) [@DEFAULT@]: '
),
prompt=True,
default=True
)
if use_engine_admin_password:
password = self.environment[
oenginecons.ConfigEnv.ADMIN_PASSWORD
]
if password is None:
password = dialog.queryPassword(
dialog=self.dialog,
logger=self.logger,
env=self.environment,
key=ogdwhcons.ConfigEnv.ADMIN_PASSWORD,
note=_(
'Grafana admin password: '
),
)
self.environment[ogdwhcons.ConfigEnv.ADMIN_PASSWORD] = password
def _get_sso_client_registration_cmd(self, tmpconf):
return (
'/usr/bin/ovirt-register-sso-client-tool '
'--callback-prefix-url='
'{grafana_url} '
'--client-ca-location={ca_pem} '
'--client-id={client_id} '
'--encrypted-userinfo=false '
'--conf-file-name={tmpconf}'
).format(
grafana_url=self._grafana_url,
ca_pem=oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
client_id=ogdwhcons.Const.OVIRT_GRAFANA_SSO_CLIENT_ID,
tmpconf=tmpconf,
)
def _process_sso_client_registration_result(self, tmpconf):
self._sso_config = configfile.ConfigFile([tmpconf])
self.environment[
otopicons.CoreEnv.LOG_FILTER
].append(
self._sso_config.get(
'SSO_CLIENT_SECRET'
)
)
def _get_engine_access_config(self):
return (
'ENGINE_GRAFANA_FQDN={fqdn}\n'
'ENGINE_GRAFANA_BASE_URL='
'https://${{ENGINE_GRAFANA_FQDN}}/{uri_path}/\n'
).format(
fqdn=self.environment[ogdwhcons.ConfigEnv.GRAFANA_FQDN],
uri_path=ogdwhcons.Const.GRAFANA_URI_PATH,
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
osetupcons.Stages.DIALOG_TITLES_E_MISC,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_MISC,
),
condition=lambda self: (
self.environment[ogdwhcons.CoreEnv.ENABLE] and
self.environment[ogdwhcons.ConfigEnv.NEW_DATABASE]
),
)
def _customization_sso(self):
if self.environment[oenginecons.CoreEnv.ENABLE]:
self._register_sso_client = True
else:
self._remote_engine = self.environment[
osetupcons.CoreEnv.REMOTE_ENGINE
]
fd, tmpconf = tempfile.mkstemp()
atexit.register(os.unlink, tmpconf)
cmd = self._get_sso_client_registration_cmd(tmpconf)
self._remote_engine.execute_on_engine(
cmd=cmd,
timeout=120,
text=_(
'Please run the following command on the engine machine '
'{engine_fqdn}:\n'
'{cmd}\n'
).format(
engine_fqdn=self.environment[
oenginecons.ConfigEnv.ENGINE_FQDN
],
cmd=cmd,
),
)
res = self._remote_engine.copy_from_engine(
file_name=tmpconf,
dialog_name='PROMPT_GRAFANA_REMOTE_ENGINE_SSO',
)
self._restart_remote_engine = dialog.queryBoolean(
dialog=self.dialog,
name='GRAFANA_RESTART_REMOTE_ENGINE_FOR_SSO',
note=_(
'The engine should be restarted for Single-Sign-On (SSO) '
'to work. Do this as part of Setup? If not, you will have '
'to do this later by yourself '
'(@VALUES@) [@DEFAULT@]: '
),
prompt=True,
default=True,
)
with open(tmpconf, 'wb') as f:
f.write(res)
self._process_sso_client_registration_result(tmpconf)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
after=(
oengcommcons.Stages.DB_CREDENTIALS_WRITTEN,
),
condition=lambda self: (
self.environment[ogdwhcons.CoreEnv.ENABLE] and
self.environment[ogdwhcons.ConfigEnv.NEW_DATABASE]
),
)
def _misc_grafana_config(self):
if self._register_sso_client:
fd, tmpconf = tempfile.mkstemp()
atexit.register(os.unlink, tmpconf)
self.execute(
self._get_sso_client_registration_cmd(
tmpconf
).split(' ')
)
self._process_sso_client_registration_result(tmpconf)
self._uninstall_files = []
self.environment[
osetupcons.CoreEnv.REGISTER_UNINSTALL_GROUPS
].addFiles(
group='ovirt_grafana_files',
fileList=self._uninstall_files,
)
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=(
ogdwhcons.FileLocations.
GRAFANA_CONFIG_FILE
),
mode=0o640,
owner='root',
group='grafana',
enforcePermissions=True,
content=outil.processTemplate(
template=(
ogdwhcons.FileLocations.
GRAFANA_CONFIG_FILE_TEMPLATE
),
subst={
'@ADMIN_PASSWORD@': self.environment[
ogdwhcons.ConfigEnv.ADMIN_PASSWORD
],
'@PROVISIONING@': (
ogdwhcons.FileLocations.
GRAFANA_PROVISIONING_CONFIGURATION
),
'@GRAFANA_PORT@': self.environment[
ogdwhcons.ConfigEnv.GRAFANA_PORT
],
'@SECRET_KEY@': self.environment[
ogdwhcons.ConfigEnv.CONF_SECRET_KEY
],
'@GRAFANA_STATE_DIR@': (
ogdwhcons.FileLocations.GRAFANA_STATE_DIR
),
'@GRAFANA_DB@': (
ogdwhcons.FileLocations.GRAFANA_DB
),
'@OVIRT_GRAFANA_SSO_CLIENT_ID@': self._sso_config.get(
'SSO_CLIENT_ID'
),
'@OVIRT_GRAFANA_SSO_CLIENT_SECRET@': (
self._sso_config.get('SSO_CLIENT_SECRET')
),
'@ENGINE_SSO_AUTH_URL@': (
'https://{fqdn}/ovirt-engine/sso'.format(
fqdn=self.environment[
oenginecons.ConfigEnv.ENGINE_FQDN
],
)
),
'@ROOT_URL@': '%s' % self._grafana_url,
'@GRAFANA_TLS_CLIENT_CA@': (
oengcommcons.FileLocations.
OVIRT_ENGINE_PKI_APACHE_CA_CERT
),
},
),
modifiedList=self._uninstall_files,
)
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
after=(
oengcommcons.Stages.DB_CREDENTIALS_WRITTEN,
),
condition=lambda self: (
self.environment[ogdwhcons.CoreEnv.ENABLE] and
self.environment[oenginecons.CoreEnv.ENABLE]
),
)
def _misc_engine_grafana_access(self):
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=(
ogdwhcons.FileLocations.
OVIRT_ENGINE_SERVICE_CONFIG_GRAFANA
),
mode=0o640,
owner='root',
group=self.environment[osetupcons.SystemEnv.GROUP_ENGINE],
enforcePermissions=True,
content=self._get_engine_access_config(),
modifiedList=self._uninstall_files,
)
)
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
after=(
osetupcons.Stages.DIALOG_TITLES_E_SUMMARY,
),
condition=lambda self: (
self.environment[ogdwhcons.CoreEnv.ENABLE] and
self.environment[ogdwhcons.ConfigEnv.NEW_DATABASE] and
not self.environment[oenginecons.CoreEnv.ENABLE]
),
)
def _closeup_engine_grafana_access(self):
self._remote_engine.copy_to_engine(
file_name=(
ogdwhcons.FileLocations.
OVIRT_ENGINE_SERVICE_CONFIG_GRAFANA
),
content=self._get_engine_access_config(),
)
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
before=(
osetupcons.Stages.DIALOG_TITLES_E_SUMMARY,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_SUMMARY,
),
condition=lambda self: (
self.environment[ogdwhcons.CoreEnv.ENABLE] and
not self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
]
),
)
def _closeup_inform_UI(self):
self.dialog.note(
text=_(
'Web access for grafana is enabled at:\n'
' {url}\n'
).format(
url=self._grafana_url,
)
)
cmd = 'systemctl restart ovirt-engine'
cmd_msg = _(
'Please run the following command on the engine machine '
'{engine_fqdn}, for SSO to work:\n'
'{cmd}\n'
).format(
engine_fqdn=self.environment[
oenginecons.ConfigEnv.ENGINE_FQDN
],
cmd=cmd,
)
if self._restart_remote_engine:
self._remote_engine.execute_on_engine(
cmd=cmd,
timeout=120,
text=cmd_msg,
)
else:
self.dialog.note(
text=cmd_msg,
)
# vim: expandtab tabstop=4 shiftwidth=4
|
"""Create your test for the user views here."""
import pytest
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.test import RequestFactory
from django.urls import reverse
from pytest_mock import MockFixture
from onebarangay_psql.users.factories import UserFactory
from onebarangay_psql.users.forms import UserAdminChangeForm
from onebarangay_psql.users.models import User
from onebarangay_psql.users.views import (
UserRedirectView,
UserUpdateView,
user_detail_view,
)
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""Test the user update view.
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def dummy_get_response(self, request: HttpRequest) -> None:
"""Get response dummy function.
Args:
request (HttpRequest): The request object.
Returns:
None: Returns None.
"""
return None
def test_get_success_url(self, user: User, rf: RequestFactory) -> None:
"""Test the UserUpdateView get_success_url method.
Args:
user (User): The user object.
rf (RequestFactory): The request factory.
"""
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(self, user: User, rf: RequestFactory) -> None:
"""Test the UserUpdateView get_object method.
Args:
user (User): The user object.
rf (RequestFactory): The request factory.
"""
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
def test_form_valid(
self, user: User, rf: RequestFactory, mocker: MockFixture
) -> None:
"""Test the UserUpdateView form_valid method.
Args:
user (User): The user object.
rf (RequestFactory): The request factory.
"""
view = UserUpdateView()
request = rf.get("/fake-url/")
get_response = mocker.MagicMock(return_value=HttpResponse())
# Add the session/message middleware to the request
SessionMiddleware(get_response).process_request(request)
MessageMiddleware(get_response).process_request(request)
request.user = user
view.request = request
# Initialize the form
form = UserAdminChangeForm()
form.cleaned_data = []
view.form_valid(form)
messages_sent = [m.message for m in messages.get_messages(request)]
assert messages_sent == ["Information successfully updated"]
class TestUserRedirectView:
"""Test the user redirect view."""
def test_get_redirect_url(self, user: User, rf: RequestFactory) -> None:
"""Test the UserRedirectView get_redirect_url method.
Args:
user:
rf:
"""
view = UserRedirectView()
request = rf.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
class TestUserDetailView:
"""Test user detail view."""
def test_authenticated(self, user: User, rf: RequestFactory):
"""Test authenticated user."""
request = rf.get("/fake-url/")
request.user = UserFactory()
response = user_detail_view(request, username=user.username)
assert response.status_code == 200
def test_not_authenticated(self, user: User, rf: RequestFactory):
"""Test not authenticated user."""
request = rf.get("/fake-url/")
request.user = AnonymousUser()
response = user_detail_view(request, username=user.username)
login_url = reverse(settings.LOGIN_URL)
assert isinstance(response, HttpResponseRedirect)
assert response.status_code == 302
assert response.url == f"{login_url}?next=/fake-url/"
|
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable = wrong-import-position
# Set numpy to single thread.
import os
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
from nums.api import init, read, write, delete, read_csv, from_modin
from nums.core.version import __version__
__all__ = ["numpy", "init", "read", "write", "delete", "read_csv", "from_modin"]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Train the student model with multiple GPUs (Librispeech corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile, abspath
import sys
import time
import tensorflow as tf
from setproctitle import setproctitle
import yaml
import shutil
sys.path.append(abspath('../../../'))
from experiments.librispeech.data.load_dataset_xe import Dataset
from utils.training.learning_rate_controller import Controller
from utils.training.plot import plot_loss
from utils.training.multi_gpu import average_gradients
from utils.directory import mkdir_join, mkdir
from utils.parameter import count_total_parameters
from models.ctc.student_ctc import StudentCTC
def do_train(model, params, gpu_indices):
"""Run CTC training.
Args:
model: the model to train
params (dict): A dictionary of parameters
gpu_indices (list): GPU indices
"""
# Load dataset
train_data = Dataset(
model_path=join(params['teacher_model_path'],
'temp' + str(params['teacher_temperature'])),
data_type='train',
batch_size=params['batch_size'], max_epoch=params['num_epoch'],
num_gpu=len(gpu_indices))
dev_clean_data = Dataset(
model_path=join(params['teacher_model_path'],
'temp' + str(params['teacher_temperature'])),
data_type='dev_clean',
batch_size=params['batch_size'], max_epoch=params['num_epoch'],
num_gpu=len(gpu_indices))
dev_other_data = Dataset(
model_path=join(params['teacher_model_path'],
'temp' + str(params['teacher_temperature'])),
data_type='dev_other',
batch_size=params['batch_size'], max_epoch=params['num_epoch'],
num_gpu=len(gpu_indices))
# Tell TensorFlow that the model will be built into the default graph
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to track the global step
global_step = tf.Variable(0, name='global_step', trainable=False)
# Set optimizer
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate')
optimizer = model._set_optimizer(
params['optimizer'], learning_rate_pl)
# Calculate the gradients for each model tower
total_grads_and_vars, total_losses = [], []
all_devices = ['/gpu:%d' % i_gpu for i_gpu in range(len(gpu_indices))]
# NOTE: /cpu:0 is prepared for evaluation
with tf.variable_scope(tf.get_variable_scope()):
for i_gpu in range(len(all_devices)):
with tf.device(all_devices[i_gpu]):
with tf.name_scope('tower_gpu%d' % i_gpu) as scope:
# Define placeholders in each tower
model.create_placeholders_xe()
# Calculate the total loss for the current tower of the
# model. This function constructs the entire model but
# shares the variables across all towers.
tower_loss, tower_logits = model.compute_xe_loss(
model.inputs_pl_list[i_gpu],
model.labels_pl_list[i_gpu],
model.keep_prob_pl_list[i_gpu],
scope,
softmax_temperature=params['student_temperature'],
is_training=True)
tower_loss = tf.expand_dims(tower_loss, axis=0)
total_losses.append(tower_loss)
# Reuse variables for the next tower
tf.get_variable_scope().reuse_variables()
# Calculate the gradients for the batch of data on this
# tower
tower_grads_and_vars = optimizer.compute_gradients(
tower_loss)
# Gradient clipping
tower_grads_and_vars = model._clip_gradients(
tower_grads_and_vars)
# TODO: Optionally add gradient noise
# Keep track of the gradients across all towers
total_grads_and_vars.append(tower_grads_and_vars)
# Aggregate losses, then calculate average loss
total_losses = tf.concat(axis=0, values=total_losses)
loss_op = tf.reduce_mean(total_losses, axis=0)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers
average_grads_and_vars = average_gradients(total_grads_and_vars)
# Apply the gradients to adjust the shared variables.
train_op = optimizer.apply_gradients(average_grads_and_vars,
global_step=global_step)
# Define learning rate controller
lr_controller = Controller(
learning_rate_init=params['learning_rate'],
decay_start_epoch=params['decay_start_epoch'],
decay_rate=params['decay_rate'],
decay_patient_epoch=params['decay_patient_epoch'],
lower_better=True)
# Build the summary tensor based on the TensorFlow collection of
# summaries
summary_train = tf.summary.merge(model.summaries_train)
summary_dev = tf.summary.merge(model.summaries_dev)
# Add the variable initializer operation
init_op = tf.global_variables_initializer()
# Create a saver for writing training checkpoints
saver = tf.train.Saver(max_to_keep=None)
# Count total parameters
parameters_dict, total_parameters = count_total_parameters(
tf.trainable_variables())
for parameter_name in sorted(parameters_dict.keys()):
print("%s %d" % (parameter_name, parameters_dict[parameter_name]))
print("Total %d variables, %s M parameters" %
(len(parameters_dict.keys()),
"{:,}".format(total_parameters / 1000000)))
csv_steps, csv_loss_train, csv_loss_dev = [], [], []
# Create a session for running operation on the graph
# NOTE: Start running operations on the Graph. allow_soft_placement
# must be set to True to build towers on GPU, as some of the ops do not
# have GPU implementations.
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
# Instantiate a SummaryWriter to output summaries and the graph
summary_writer = tf.summary.FileWriter(
model.save_path, sess.graph)
# Initialize parameters
sess.run(init_op)
# Train model
start_time_train = time.time()
start_time_epoch = time.time()
start_time_step = time.time()
loss_dev_best = 10000
not_improved_epoch = 0
learning_rate = float(params['learning_rate'])
for step, (data, is_new_epoch) in enumerate(train_data):
# Create feed dictionary for next mini batch (train)
inputs, labels = data
feed_dict_train = {}
for i_gpu in range(len(gpu_indices)):
feed_dict_train[model.inputs_pl_list[i_gpu]
] = inputs[i_gpu]
feed_dict_train[model.labels_pl_list[i_gpu]
] = labels[i_gpu]
feed_dict_train[model.keep_prob_pl_list[i_gpu]
] = 1 - float(params['dropout'])
feed_dict_train[learning_rate_pl] = learning_rate
# Update parameters
sess.run(train_op, feed_dict=feed_dict_train)
if (step + 1) % int(params['print_step'] / len(gpu_indices)) == 0:
# Create feed dictionary for next mini batch (dev)
if params['train_data_size'] in ['train100h', 'train460h']:
inputs, labels = dev_clean_data.next()[0]
else:
inputs, labels = dev_other_data.next()[0]
feed_dict_dev = {}
for i_gpu in range(len(gpu_indices)):
feed_dict_dev[model.inputs_pl_list[i_gpu]
] = inputs[i_gpu]
feed_dict_dev[model.labels_pl_list[i_gpu]
] = labels[i_gpu]
feed_dict_dev[model.keep_prob_pl_list[i_gpu]] = 1.0
# Compute loss
loss_train = sess.run(loss_op, feed_dict=feed_dict_train)
loss_dev = sess.run(loss_op, feed_dict=feed_dict_dev)
csv_steps.append(step)
csv_loss_train.append(loss_train)
csv_loss_dev.append(loss_dev)
# Change to evaluation mode
for i_gpu in range(len(gpu_indices)):
feed_dict_train[model.keep_prob_pl_list[i_gpu]] = 1.0
# Compute accuracy & update event files
summary_str_train = sess.run(
summary_train, feed_dict=feed_dict_train)
summary_str_dev = sess.run(
summary_dev, feed_dict=feed_dict_dev)
summary_writer.add_summary(summary_str_train, step + 1)
summary_writer.add_summary(summary_str_dev, step + 1)
summary_writer.flush()
duration_step = time.time() - start_time_step
print("Step %d (epoch: %.3f): loss = %.3f (%.3f) / lr = %.5f (%.3f min)" %
(step + 1, train_data.epoch_detail, loss_train, loss_dev,
learning_rate, duration_step / 60))
sys.stdout.flush()
start_time_step = time.time()
# Save checkpoint and evaluate model per epoch
if is_new_epoch:
duration_epoch = time.time() - start_time_epoch
print('-----EPOCH:%d (%.3f min)-----' %
(train_data.epoch, duration_epoch / 60))
# Save fugure of loss & ler
plot_loss(csv_loss_train, csv_loss_dev, csv_steps,
save_path=model.save_path)
# Save model (check point)
checkpoint_file = join(
model.save_path, 'model.ckpt')
save_path = saver.save(
sess, checkpoint_file, global_step=train_data.epoch)
print("Model saved in file: %s" % save_path)
if train_data.epoch >= params['eval_start_epoch']:
start_time_eval = time.time()
print('=== Dev Data Evaluation ===')
# dev-clean
loss_dev_clean_epoch = do_eval_loss(
session=sess,
loss_op=loss_op,
model=model,
dataset=dev_clean_data,
label_type=params['label_type'],
eval_batch_size=params['batch_size'])
print(' LOSS (clean): %f' % loss_dev_clean_epoch)
# dev-other
# loss_dev_other_epoch = do_eval_loss(
# session=sess,
# loss_op=loss_op,
# model=model,
# dataset=dev_other_data,
# label_type=params['label_type'],
# eval_batch_size=params['batch_size'])
# print(' LOSS (other): %f' % loss_dev_other_epoch)
if params['train_data_size'] in ['train100h', 'train460h']:
metric_epoch = loss_dev_clean_epoch
else:
metric_epoch = loss_dev_other_epoch
if metric_epoch < loss_dev_best:
loss_dev_best = metric_epoch
not_improved_epoch = 0
print('■■■ ↑Best Score (LOSS)↑ ■■■')
else:
not_improved_epoch += 1
duration_eval = time.time() - start_time_eval
print('Evaluation time: %.3f min' %
(duration_eval / 60))
# Early stopping
if not_improved_epoch == params['not_improved_patient_epoch']:
break
# Update learning rate
learning_rate = lr_controller.decay_lr(
learning_rate=learning_rate,
epoch=train_data.epoch,
value=metric_epoch)
start_time_step = time.time()
start_time_epoch = time.time()
duration_train = time.time() - start_time_train
print('Total time: %.3f hour' % (duration_train / 3600))
# Training was finished correctly
with open(join(model.save_path, 'complete.txt'), 'w') as f:
f.write('')
def do_eval_loss(session, loss_op, model, dataset, label_type,
eval_batch_size=None,):
batch_size_original = dataset.batch_size
# Reset data counter
dataset.reset()
# Set batch size in the evaluation
if eval_batch_size is not None:
dataset.batch_size = eval_batch_size
loss_sum = 0
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
inputs, labels = data
feed_dict = {}
for i_device in range(dataset.num_gpu):
feed_dict[model.inputs_pl_list[i_device]] = inputs[i_device]
feed_dict[model.labels_pl_list[i_device]] = labels[i_device]
feed_dict[model.keep_prob_pl_list[i_device]] = 1.0
loss_sum += session.run(loss_op, feed_dict=feed_dict)
if is_new_epoch:
break
# Register original batch size
if eval_batch_size is not None:
dataset.batch_size = batch_size_original
return loss_sum
def main(config_path, model_save_path, gpu_indices):
# Load a config file (.yml)
with open(config_path, "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a blank class
params['num_classes'] = 28
# Model setting
model = StudentCTC(
encoder_type=params['encoder_type'],
input_size=params['input_size'] *
params['num_stack'] * params['splice'],
splice=params['splice'],
num_stack=params['num_stack'],
num_classes=params['num_classes'],
parameter_init=params['weight_init'],
clip_grad_norm=params['clip_grad_norm'],
weight_decay=params['weight_decay'])
# Set process name
setproctitle(
'tf_libri_' + model.name + '_' + params['train_data_size'] + '_' + params['label_type'])
model.name += '_' + params['optimizer']
model.name += '_lr' + str(params['learning_rate'])
if params['dropout'] != 0:
model.name += '_drop' + str(params['dropout'])
if params['num_stack'] != 1:
model.name += '_stack' + str(params['num_stack'])
if params['weight_decay'] != 0:
model.name += '_wd' + str(params['weight_decay'])
if len(gpu_indices) >= 2:
model.name += '_gpu' + str(len(gpu_indices))
# Set save path
model.save_path = mkdir_join(
model_save_path, 'student_ctc', params['label_type'],
params['train_data_size'], model.name)
# Reset model directory
model_index = 0
new_model_path = model.save_path
while True:
if isfile(join(new_model_path, 'complete.txt')):
# Training of the first model have been finished
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
elif isfile(join(new_model_path, 'config.yml')):
# Training of the first model have not been finished yet
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
else:
break
model.save_path = mkdir(new_model_path)
# Save config file
shutil.copyfile(config_path, join(model.save_path, 'config.yml'))
sys.stdout = open(join(model.save_path, 'train.log'), 'w')
# TODO(hirofumi): change to logger
do_train(model=model, params=params, gpu_indices=gpu_indices)
if __name__ == '__main__':
args = sys.argv
if len(args) != 3 and len(args) != 4:
raise ValueError
main(config_path=args[1], model_save_path=args[2],
gpu_indices=list(map(int, args[3].split(','))))
|
import os
from typing import List
from ds_simple_db.core.entry import Entry
from ds_simple_db.core.filter import Filter
from ds_simple_db.serializers import SerializerFactory
class Storage:
"""
A base class for all storage types that provide underlying structures to store data.
Storage can be persistent (e.g., databases) or non-persistent (in-memory).
In general, a Storage subclasses can even represent a combination of multiple other ModelStorage subclasses
to provide a synchronized multi-storage architecture.
"""
def insert(self, data_dict: dict) -> Entry:
"""
Insert a value to the storage.
This operation, depending on implementation, does not necessarily commit a change to the actual storage.
Some storage types may use an internal cache to optimize write operations.
:param data_dict: An dictionary containing data to insert
:return: Inserted Entry
"""
pass
def columns(self) -> list:
"""
Get all columns present in the storage
:return: A list of columns of the storage
"""
pass
def all_entries(self) -> List[Entry]:
"""
Retrieve all entries in the storage
:return: A list of all entries in the storage
"""
pass
def filter(self, filter_obj: Filter) -> List[Entry]:
"""
Retrieve the data from the storage using a given filter.
An empty filter must return an empty list
:param filter_obj: A filter to apply to the storage entries
:return: A list of filtered entries
"""
pass
def load_from_file(self, db_path):
"""
Load (deserialize) a storage from a file
The type of serializer is inferred from the file extension using SerializerFactory
Note that this does not clear the storage, so you can load multiple files into the storage
:param db_path: Path to a file to deserialize data from
:return:
"""
if not os.path.exists(db_path):
raise ValueError(f"File `{db_path}` does not exist")
with open(db_path, 'r') as fp:
raw_data_str = fp.read()
serializer = self._get_serializer_from_file_name(db_path)
deserialized_data = serializer.entries_from_string(raw_data_str)
for entry in deserialized_data:
self.insert(**entry.as_dict())
def save_to_file(self, db_path):
"""
Save (serialize) the storage to a file
The type of serializer is inferred from the file extension using SerializerFactory
:param db_path: Path to a file to serialize data to
:return:
"""
serializer = self._get_serializer_from_file_name(db_path)
serialized_data = serializer.entries_to_string(self.all_entries())
db_folder = os.path.dirname(db_path)
if not os.path.exists(db_folder):
os.makedirs(db_folder)
with open(db_path, 'w') as fp:
fp.write(serialized_data)
def _get_serializer_from_file_name(self, db_path):
db_format = db_path.split('.')[-1]
serializer = SerializerFactory.create(db_format)
return serializer
|
from unittest.mock import patch
from unittest import mock, TestCase
from requests import Session
from dhf_wrapper.client import PaymentClient
from dhf_wrapper.entities.payment import PaymentDTO
class TestPaymentClient(TestCase):
@patch.object(Session, 'get')
def test_positive_getting_payments(self, mock_get):
payments = [
{
"data": [
{
"store": 60,
"amount": "2500000000",
"status": "Not_paid",
"comment": "Tips",
"type": 1,
"text": "Pay"
}
],
"count": 0,
"total": 0,
"page": 0,
"pageCount": 0
}
]
mocked_session = mock.MagicMock()
mocked_session.__enter__.return_value = mock.MagicMock(get=mock.MagicMock(return_value=payments), json=lambda: payments)
mock_get.return_value = mocked_session
payment_client = PaymentClient('http://example.com', token='xxxxx')
response = payment_client.get_payments()
self.assertEqual(response, payments)
@patch.object(Session, 'get')
def test_positive_getting_payment(self, mock_get):
payment = {
"data": [
{
"store": 60,
"amount": "2500000000",
"status": "Not_paid",
"comment": "Tips",
"type": 1,
"text": "Pay"
}
],
"count": 0,
"total": 0,
"page": 0,
"pageCount": 0
}
mocked_session = mock.MagicMock()
mocked_session.__enter__.return_value = mock.MagicMock(get=mock.MagicMock(return_value=payment), json=lambda: payment)
mock_get.return_value = mocked_session
payment_client = PaymentClient('http://example.com', token='xxxxx')
response = payment_client.get_payment(payment_id=1)
self.assertEqual(response, payment)
@patch.object(Session, 'post')
def test_positive_create_payment(self, mock_post):
payment_id = {
"id": 1
}
mocked_session = mock.MagicMock()
mocked_session.__enter__.return_value = mock.MagicMock(post=mock.MagicMock(return_value=payment_id), json=lambda: payment_id)
mock_post.return_value = mocked_session
payment_client = PaymentClient('http://example.com', token='xxxxx')
response = payment_client.create_payment(payment=PaymentDTO(
store=2,
amount=1234,
status='paid',
comment="test",
type=1,
text="test",
))
self.assertEqual(response, payment_id)
@patch.object(Session, 'post')
def test_negative_create_payment_without_params(self, mock_post):
payment_id = {
"id": 1
}
mocked_session = mock.MagicMock()
mocked_session.__enter__.return_value = mock.MagicMock(post=mock.MagicMock(return_value=payment_id), json=lambda: payment_id)
mock_post.return_value = mocked_session
payment_client = PaymentClient('http://example.com', token='xxxxx')
with self.assertRaises(TypeError) as e:
payment_client.create_payment()
|
# -*- coding: utf-8 -*-
"""Test Workflow for playing with deployment options."""
import time
from dask.distributed import Client, progress
def set_value(value):
"""."""
return value
def square(value):
"""Square the specified value."""
time.sleep(2.0)
return value**2
def neg(value):
"""Return a negative copy of the value """
time.sleep(2.0)
return -value
def main():
client = Client('localhost:8786')
A = client.map(set_value, range(100))
B = client.map(square, A)
C = client.map(neg, B)
total = client.submit(sum, C)
print(progress(total))
print(total.result())
if __name__ == '__main__':
main()
|
##############################################################################
# Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com>
# Copyright (c) 2018 Intel Corporation.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
""" Handler for yardstick command 'report' """
from yardstick.benchmark.core import report
from yardstick.cmd.commands import change_osloobj_to_paras
from yardstick.common.utils import cliargs
class ReportCommands(object): # pragma: no cover
"""Report commands.
Set of commands to manage reports.
"""
@cliargs("task_id", type=str, help=" task id", nargs=1)
@cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
def do_generate(self, args):
"""Generate a report."""
param = change_osloobj_to_paras(args)
report.Report().generate(param)
@cliargs("task_id", type=str, help=" task id", nargs=1)
@cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
def do_generate_nsb(self, args):
"""Generate a report using the NSB template."""
param = change_osloobj_to_paras(args)
report.Report().generate_nsb(param)
|
from django.contrib.gis.db.models.fields import BaseSpatialField
from django.contrib.gis.measure import Distance
from django.db import NotSupportedError
from django.db.models.expressions import Expression
from django.db.models.lookups import Lookup, Transform
from django.db.models.sql.query import Query
from django.utils.regex_helper import _lazy_re_compile
class RasterBandTransform(Transform):
def as_sql(self, compiler, connection):
return compiler.compile(self.lhs)
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
band_rhs = None
band_lhs = None
def __init__(self, lhs, rhs):
rhs, *self.rhs_params = rhs if isinstance(rhs, (list, tuple)) else [rhs]
super().__init__(lhs, rhs)
self.template_params = {}
self.process_rhs_params()
def process_rhs_params(self):
if self.rhs_params:
# Check if a band index was passed in the query argument.
if len(self.rhs_params) == (2 if self.lookup_name == 'relate' else 1):
self.process_band_indices()
elif len(self.rhs_params) > 1:
raise ValueError('Tuple too long for lookup %s.' % self.lookup_name)
elif isinstance(self.lhs, RasterBandTransform):
self.process_band_indices(only_lhs=True)
def process_band_indices(self, only_lhs=False):
"""
Extract the lhs band index from the band transform class and the rhs
band index from the input tuple.
"""
# PostGIS band indices are 1-based, so the band index needs to be
# increased to be consistent with the GDALRaster band indices.
if only_lhs:
self.band_rhs = 1
self.band_lhs = self.lhs.band_index + 1
return
if isinstance(self.lhs, RasterBandTransform):
self.band_lhs = self.lhs.band_index + 1
else:
self.band_lhs = 1
self.band_rhs, *self.rhs_params = self.rhs_params
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
return ('%s', [connection.ops.Adapter(value)])
def process_rhs(self, compiler, connection):
if isinstance(self.rhs, Query):
# If rhs is some Query, don't touch it.
return super().process_rhs(compiler, connection)
if isinstance(self.rhs, Expression):
self.rhs = self.rhs.resolve_expression(compiler.query)
rhs, rhs_params = super().process_rhs(compiler, connection)
placeholder = connection.ops.get_geom_placeholder(self.lhs.output_field, self.rhs, compiler)
return placeholder % rhs, rhs_params
def get_rhs_op(self, connection, rhs):
# Unlike BuiltinLookup, the GIS get_rhs_op() implementation should return
# an object (SpatialOperator) with an as_sql() method to allow for more
# complex computations (where the lhs part can be mixed in).
return connection.ops.gis_operators[self.lookup_name]
def as_sql(self, compiler, connection):
lhs_sql, lhs_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params = (*lhs_params, *rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql, 'value': '%s', **self.template_params}
rhs_op = self.get_rhs_op(connection, rhs_sql)
return rhs_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
@BaseSpatialField.register_lookup
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
@BaseSpatialField.register_lookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
@BaseSpatialField.register_lookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
@BaseSpatialField.register_lookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
@BaseSpatialField.register_lookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
@BaseSpatialField.register_lookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
@BaseSpatialField.register_lookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
@BaseSpatialField.register_lookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
@BaseSpatialField.register_lookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
BaseSpatialField.register_lookup(SameAsLookup, 'exact')
@BaseSpatialField.register_lookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
@BaseSpatialField.register_lookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
@BaseSpatialField.register_lookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
# ------------------
# Geometry functions
# ------------------
@BaseSpatialField.register_lookup
class ContainsLookup(GISLookup):
lookup_name = 'contains'
@BaseSpatialField.register_lookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
@BaseSpatialField.register_lookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
@BaseSpatialField.register_lookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
@BaseSpatialField.register_lookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
@BaseSpatialField.register_lookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
@BaseSpatialField.register_lookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
@BaseSpatialField.register_lookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
@BaseSpatialField.register_lookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
@BaseSpatialField.register_lookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = _lazy_re_compile(r'^[012TF\*]{9}$')
def process_rhs(self, compiler, connection):
# Check the pattern argument
pattern = self.rhs_params[0]
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(pattern)
elif not isinstance(pattern, str) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
sql, params = super().process_rhs(compiler, connection)
return sql, params + [pattern]
@BaseSpatialField.register_lookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
@BaseSpatialField.register_lookup
class WithinLookup(GISLookup):
lookup_name = 'within'
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %(value)s'
def process_rhs_params(self):
if not 1 <= len(self.rhs_params) <= 3:
raise ValueError("2, 3, or 4-element tuple required for '%s' lookup." % self.lookup_name)
elif len(self.rhs_params) == 3 and self.rhs_params[2] != 'spheroid':
raise ValueError("For 4-element tuples the last argument must be the 'spheroid' directive.")
# Check if the second parameter is a band index.
if len(self.rhs_params) > 1 and self.rhs_params[1] != 'spheroid':
self.process_band_indices()
def process_distance(self, compiler, connection):
dist_param = self.rhs_params[0]
return (
compiler.compile(dist_param.resolve_expression(compiler.query))
if hasattr(dist_param, 'resolve_expression') else
('%s', connection.ops.get_distance(self.lhs.output_field, self.rhs_params, self.lookup_name))
)
@BaseSpatialField.register_lookup
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %(value)s)'
def process_distance(self, compiler, connection):
dist_param = self.rhs_params[0]
if (
not connection.features.supports_dwithin_distance_expr and
hasattr(dist_param, 'resolve_expression') and
not isinstance(dist_param, Distance)
):
raise NotSupportedError(
'This backend does not support expressions for specifying '
'distance in the dwithin lookup.'
)
return super().process_distance(compiler, connection)
def process_rhs(self, compiler, connection):
dist_sql, dist_params = self.process_distance(compiler, connection)
self.template_params['value'] = dist_sql
rhs_sql, params = super().process_rhs(compiler, connection)
return rhs_sql, params + dist_params
class DistanceLookupFromFunction(DistanceLookupBase):
def as_sql(self, compiler, connection):
spheroid = (len(self.rhs_params) == 2 and self.rhs_params[-1] == 'spheroid') or None
distance_expr = connection.ops.distance_expr_for_lookup(self.lhs, self.rhs, spheroid=spheroid)
sql, params = compiler.compile(distance_expr.resolve_expression(compiler.query))
dist_sql, dist_params = self.process_distance(compiler, connection)
return (
'%(func)s %(op)s %(dist)s' % {'func': sql, 'op': self.op, 'dist': dist_sql},
params + dist_params,
)
@BaseSpatialField.register_lookup
class DistanceGTLookup(DistanceLookupFromFunction):
lookup_name = 'distance_gt'
op = '>'
@BaseSpatialField.register_lookup
class DistanceGTELookup(DistanceLookupFromFunction):
lookup_name = 'distance_gte'
op = '>='
@BaseSpatialField.register_lookup
class DistanceLTLookup(DistanceLookupFromFunction):
lookup_name = 'distance_lt'
op = '<'
@BaseSpatialField.register_lookup
class DistanceLTELookup(DistanceLookupFromFunction):
lookup_name = 'distance_lte'
op = '<='
|
import pytest
import capybara
class TestHasAllOfSelectors:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_is_true_if_the_given_selectors_are_on_the_page(self, session):
assert session.has_all_of_selectors("css", "p a#foo", "h2#h2one", "h2#h2two") is True
def test_is_false_if_any_of_the_given_selectors_are_not_on_the_page(self, session):
assert session.has_all_of_selectors("css", "p #afoo", "h2#h2three", "h2#h2one") is False
def test_uses_default_selector(self, session):
capybara.default_selector = "css"
assert session.has_all_of_selectors("p a#foo", "h2#h2one", "h2#h2two")
assert not session.has_all_of_selectors("p #afoo", "h2#h2three", "h2#h2one")
def test_respects_scopes_when_used_with_a_context(self, session):
with session.scope("//p[@id='first']"):
assert session.has_all_of_selectors(".//a[@id='foo']")
assert not session.has_all_of_selectors(".//a[@id='red']")
def test_respects_scopes_when_called_on_elements(self, session):
el = session.find("//p[@id='first']")
assert el.has_all_of_selectors(".//a[@id='foo']")
assert not el.has_all_of_selectors(".//a[@id='red']")
def test_applies_options_to_all_locators(self, session):
assert session.has_all_of_selectors(
"field", "normal", "additional_newline", field_type="textarea")
assert not session.has_all_of_selectors(
"field", "normal", "test_field", "additional_newline", field_type="textarea")
@pytest.mark.requires("js")
def test_does_not_raise_error_if_all_the_elements_appear_before_given_wait_duration(self, session):
with capybara.using_wait_time(0.1):
session.visit("/with_js")
session.click_link("Click me")
assert session.has_all_of_selectors(
"css", "a#clickable", "a#has-been-clicked", "#drag", wait=5)
|
class Reference:
def __init__(self, data, last_ref, next_ref):
self.data, last_ref, next_ref = data, last_ref, next_ref
def __str__(self):
retstr = str([self.last_ref.data if self.last_ref else None, self.data, self.next_ref.data if self.next_ref else None])
return retstr[1:-1]
class DoubleLinkedList:
head = None
end = None
def __str__(self):
if len(self) < 1:
return '[]'
current_ref = self.head
retstr = '['
while current_ref:
retstr = retstr + str(current_ref.data) + ', '
current_ref = current_ref.next_ref
retstr = retstr[:len(retstr)-2] + ']'
return retstr
def __len__(self):
length = 0
current_ref = self.head
while current_ref:
length += 1
current_ref = current_ref.next_ref
return length
def _append(self, data, left=False):
new_ref = Reference(data, None, None)
if self.head is None:
self.head = self.end = new_ref
else:
if left:
new_ref.next_ref = self.head
new_ref.last_ref = None
self.head.last_ref = new_ref
self.head = new_ref
else:
new_ref.last_ref = self.end
new_ref.next_ref = None
self.end.next_ref = new_ref
self.end = new_ref
def append(self, data):
self._append(data)
def appendleft(self, data):
self._append(data, True)
def _pop(self, left=False):
if len(self):
ref = self.head if left else self.end
self.remove(ref.data, left)
return ref.data
raise IndexError('pop from empty dequeue')
def pop(self, i=True):
return self._pop(bool(i))
def popleft(self):
return self._pop(True)
def remove(self, value, left=False):
current_ref = self.head if left else self.end
if not len(self):
raise IndexError('remove item from empty dequeue')
while current_ref:
if current_ref.data == value:
if current_ref.last_ref and current_ref.next_ref:
current_ref.last_ref.next_ref = current_ref.next_ref
current_ref.next_ref.last_ref = current_ref.last_ref
else:
if left:
self.head = current_ref.next_ref
if current_ref.next_ref:
current_ref.next_ref.last_ref = None
else:
self.end = current_ref.last_ref
if current_ref.last_ref:
current_ref.last_ref.next_ref = None
break
else:
current_ref = current_ref.next_ref if left else current_ref.last_ref
def reveal(self):
current_ref = self.head
while current_ref:
print(current_ref)
current_ref = current_ref.next_ref
class Queue:
def __init__(self, queue_type='f', maxsize=None):
if queue_type.lower()[0] in 'lf':
self.queue_type = queue_type
else:
raise TypeError('queue_type must be (l) or (f)')
if maxsize:
self.maxsize = maxsize
else:
self.maxsize = 0
self.queue = []
def __len__(self):
return len(self.queue)
def put(self, item):
if self.maxsize > 0:
if self.isfull():
raise ValueError('Queue is full')
self.queue.append(item)
def get(self):
if self.queue_type == 'f':
return self.queue.pop(0)
elif self.queue_type == 'l':
return self.queue.pop(len(self.queue)-1)
else:
raise TypeError('Type must be (l) or (f)')
def empty(self):
return not bool(self.queue)
def isfull(self):
return len(self) == self.maxsize if self.maxsize <= 0 else False
class DEQueue(Queue):
def __init__(self, queue_type='f', maxsize=None):
Queue.__init__(self, queue_type, maxsize)
pass
|
# Generated by Django 3.2.2 on 2021-05-31 16:49
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0331_scheduledmessagenotificationemail"),
]
operations = [
migrations.CreateModel(
name="RealmUserDefault",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("enter_sends", models.BooleanField(null=True, default=False)),
("left_side_userlist", models.BooleanField(default=False)),
("default_language", models.CharField(default="en", max_length=50)),
("default_view", models.TextField(default="recent_topics")),
("dense_mode", models.BooleanField(default=True)),
("fluid_layout_width", models.BooleanField(default=False)),
("high_contrast_mode", models.BooleanField(default=False)),
("translate_emoticons", models.BooleanField(default=False)),
("twenty_four_hour_time", models.BooleanField(default=False)),
("starred_message_counts", models.BooleanField(default=True)),
("color_scheme", models.PositiveSmallIntegerField(default=1)),
("demote_inactive_streams", models.PositiveSmallIntegerField(default=1)),
(
"emojiset",
models.CharField(
choices=[
("google", "Google modern"),
("google-blob", "Google classic"),
("twitter", "Twitter"),
("text", "Plain text"),
],
default="google-blob",
max_length=20,
),
),
("enable_stream_desktop_notifications", models.BooleanField(default=False)),
("enable_stream_email_notifications", models.BooleanField(default=False)),
("enable_stream_push_notifications", models.BooleanField(default=False)),
("enable_stream_audible_notifications", models.BooleanField(default=False)),
("notification_sound", models.CharField(default="zulip", max_length=20)),
("wildcard_mentions_notify", models.BooleanField(default=True)),
("enable_desktop_notifications", models.BooleanField(default=True)),
("pm_content_in_desktop_notifications", models.BooleanField(default=True)),
("enable_sounds", models.BooleanField(default=True)),
("enable_offline_email_notifications", models.BooleanField(default=True)),
("message_content_in_email_notifications", models.BooleanField(default=True)),
("enable_offline_push_notifications", models.BooleanField(default=True)),
("enable_online_push_notifications", models.BooleanField(default=True)),
("desktop_icon_count_display", models.PositiveSmallIntegerField(default=1)),
("enable_digest_emails", models.BooleanField(default=True)),
("enable_login_emails", models.BooleanField(default=True)),
("enable_marketing_emails", models.BooleanField(default=True)),
("realm_name_in_notifications", models.BooleanField(default=False)),
("presence_enabled", models.BooleanField(default=True)),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.realm"
),
),
],
options={
"abstract": False,
},
),
]
|
from django.core.exceptions import ImproperlyConfigured
from mock import patch
from pytest import raises
from task_api.utils import resolve_class, get_backend_cls
class Test(object):
pass
def test_resolve_class():
assert resolve_class('tests.test_utils.Test') == Test
def test_resolve_invalid_class():
assert resolve_class('tests.test_utils.Foo') is None
assert resolve_class('tests.foobar.Test') is None
@patch('task_api.utils.TASK_API_BACKEND', 'tests.test_utils.Test')
def test_get_backend_cls():
assert get_backend_cls() == Test
@patch('task_api.utils.TASK_API_BACKEND', 'tests.test_utils.Foo')
def test_get_invalid_backend_cls():
with raises(ImproperlyConfigured):
get_backend_cls()
@patch('task_api.utils.TASK_API_BACKEND', 'tests.foobar.Test')
def test_get_invalid_backend_cls_module():
with raises(ImproperlyConfigured):
get_backend_cls()
|
# Standard Library
import datetime
import os
# Third Party Code
from dateutil.tz import tzutc
import responses
# Supercell Code
from supercell.breezometer.air_quality import (
air_quality_forecast_hourly,
current_air_quality,
historical_air_quality_hourly,
)
from supercell.breezometer.air_quality.models.air_quality_api_response import (
AirQualityAPIResponse,
)
from supercell.breezometer.air_quality.models.air_quality_collection_api_response import (
AirQualityCollectionAPIResponse,
)
@responses.activate
def test_current_air_quality():
path = os.path.join(
os.path.dirname(__file__), "..", "example_responses", "current-example-1.json"
)
responses.add(
responses.GET,
"https://api.breezometer.com/air-quality/v2/current-conditions",
body=open(path, "r").read(),
adding_headers={"Content-Type": "application/json",},
status=200,
)
assert isinstance(
current_air_quality(
latitude=39.3939, longitude=-109.10909, api_key="aaBBccDD", delay=0,
),
AirQualityAPIResponse,
)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == (
"https://api.breezometer.com/air-quality/v2/current-conditions?"
"features=breezometer_aqi,local_aqi,pollutants_concentrations,"
"all_pollutants_concentrations,pollutants_aqi_information&"
"key=aaBBccDD&lat=39.3939&lon=-109.10909&metadata=true"
)
@responses.activate
def test_air_quality_forecast_hourly():
path = os.path.join(
os.path.dirname(__file__), "..", "example_responses", "forecast-example-1.json"
)
responses.add(
responses.GET,
"https://api.breezometer.com/air-quality/v2/forecast/hourly",
body=open(path, "r").read(),
adding_headers={"Content-Type": "application/json",},
status=200,
)
assert isinstance(
air_quality_forecast_hourly(
latitude=39.3939,
longitude=-109.10909,
api_key="aaBBccDD",
delay=0,
hours=120,
),
AirQualityCollectionAPIResponse,
)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == (
"https://api.breezometer.com/air-quality/v2/forecast/hourly?"
"hours=120&key=aaBBccDD&lat=39.3939&lon=-109.10909&metadata=false"
)
@responses.activate
def test_historical_air_quality_hourly():
path = os.path.join(
os.path.dirname(__file__),
"..",
"example_responses",
"historical-example-1.json",
)
responses.add(
responses.GET,
"https://api.breezometer.com/air-quality/v2/historical/hourly",
body=open(path, "r").read(),
adding_headers={"Content-Type": "application/json",},
status=200,
)
assert isinstance(
historical_air_quality_hourly(
latitude=39.3939,
longitude=-109.10909,
api_key="aaBBccDD",
delay=0,
utc_datetime=datetime.datetime(2020, 8, 24, 18, 16, 22, tzinfo=tzutc()),
),
AirQualityAPIResponse,
)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == (
"https://api.breezometer.com/air-quality/v2/historical/hourly?"
"datetime=2020-08-24T18:16:22+00:00&features=breezometer_aqi&"
"key=aaBBccDD&lat=39.3939&lon=-109.10909&metadata=true"
)
|
import datetime
from typing import List, Optional, Set
import sqlalchemy
from telegram_logger.chat_log import ChatLog
from telegram_logger.log_entry import LogEntry
class Database:
def __init__(self, db_str: str) -> None:
self.engine = sqlalchemy.create_engine(db_str)
self.conn = self.engine.connect()
self.metadata = sqlalchemy.MetaData()
self.chat_logs = sqlalchemy.Table(
"telepisg_chat_logs",
self.metadata,
sqlalchemy.Column("chat_handle", sqlalchemy.String(), nullable=False, primary_key=True),
sqlalchemy.Column("last_message_id", sqlalchemy.Integer())
)
self.log_entries = sqlalchemy.Table(
"telepisg_log_entries",
self.metadata,
sqlalchemy.Column(
"chat_handle",
sqlalchemy.String(),
sqlalchemy.ForeignKey(
"telepisg_chat_logs.chat_handle",
ondelete="CASCADE"
),
nullable=False
),
sqlalchemy.Column("datetime", sqlalchemy.DateTime()),
sqlalchemy.Column("entry_type", sqlalchemy.String()),
sqlalchemy.Column("user_id", sqlalchemy.Integer()),
sqlalchemy.Column("message_id", sqlalchemy.Integer()),
sqlalchemy.Column("sub_message_id", sqlalchemy.Integer()),
sqlalchemy.Column("text", sqlalchemy.Text()),
sqlalchemy.UniqueConstraint("chat_handle", "message_id", "sub_message_id")
)
self.metadata.create_all(self.engine)
def insert_log_entries(self, chat_handle: str, log_entries: List["LogEntry"]):
query = sqlalchemy.insert(self.log_entries)
values_list = [
log_entry.to_row(chat_handle) for log_entry in log_entries
]
self.conn.execute(query, values_list)
def list_log_dates(self, chat_handle: str) -> List[datetime.date]:
cols = [sqlalchemy.cast(self.log_entries.columns.datetime, sqlalchemy.Date)]
if self.engine.url.drivername == "sqlite":
cols = [sqlalchemy.func.DATE(self.log_entries.columns.datetime).label("datetime")]
query = sqlalchemy.select(
cols
).distinct(
).where(
self.log_entries.columns.chat_handle == chat_handle
).order_by(
sqlalchemy.asc(self.log_entries.columns.datetime)
)
result = self.conn.execute(query)
rows = result.fetchall()
if self.engine.url.drivername == "sqlite":
return [
datetime.date.fromisoformat(row.datetime) for row in rows
]
return [
row.datetime for row in rows
]
def list_log_entries(self, chat_handle: str, log_date: Optional[datetime.date]):
conditions = [
self.log_entries.columns.chat_handle == chat_handle
]
if log_date:
start_datetime = datetime.datetime.combine(log_date, datetime.time(0, 0, 0))
end_datetime = start_datetime + datetime.timedelta(days=1)
conditions.extend([
self.log_entries.columns.datetime >= start_datetime,
self.log_entries.columns.datetime < end_datetime
])
query = sqlalchemy.select(
self.log_entries.columns
).where(
sqlalchemy.and_(*conditions)
).order_by(
sqlalchemy.asc(self.log_entries.columns.message_id),
sqlalchemy.asc(self.log_entries.columns.sub_message_id)
)
result = self.conn.execute(query)
return [
LogEntry.from_row(row)
for row in result.fetchall()
]
def list_user_ids(self, chat_handle: str) -> Set[int]:
query = sqlalchemy.select(
self.log_entries.columns.user_id
).distinct(
self.log_entries.columns.user_id
).where(
self.log_entries.columns.chat_handle == chat_handle
)
result = self.conn.execute(query)
return set([row.user_id for row in result.fetchall()])
def update_chat_log(self, chat_handle: str, last_message_id: Optional[int]):
query = sqlalchemy.update(
self.chat_logs
).values(
last_message_id=last_message_id
).where(
self.chat_logs.columns.chat_handle == chat_handle
)
self.conn.execute(query)
def create_chat_log(self, chat_handle: str) -> None:
query = sqlalchemy.insert(
self.chat_logs
).values(
chat_handle=chat_handle,
last_message_id=None
)
self.conn.execute(query)
def get_chat_log(self, chat_handle: str) -> "ChatLog":
query = sqlalchemy.select(
self.chat_logs.columns.last_message_id
).where(
self.chat_logs.columns.chat_handle == chat_handle
)
result = self.conn.execute(query)
row = result.fetchone()
if row is None:
self.create_chat_log(chat_handle)
return ChatLog(chat_handle, self)
return ChatLog(chat_handle, self, last_message_id=row.last_message_id)
|
__version__ = '0.24.dev0'
|
"""
Created by @Jisan7509
plugin for Cat_Userbot
☝☝☝
You remove this, you gay.
"""
import os
from telethon.errors.rpcerrorlist import YouBlockedUserError
from ..core.managers import edit_delete, edit_or_reply
from ..helpers.functions import clippy
from . import _codtools, codex, convert_toimage, mention, reply_id
plugin_category = "extra"
@codex.cod_cmd(
pattern="iascii ?([\s\S]*)",
command=("iascii", plugin_category),
info={
"header": "Convert media to ascii art.",
"description": "Reply to any media files like pic, gif, sticker, video and it will convert into ascii.",
"usage": [
"{tr}iascii <reply to a media>",
],
},
)
async def bad(event):
"Make a media to ascii art"
reply_message = await event.get_reply_message()
if not event.reply_to_msg_id or not reply_message.media:
return await edit_delete(event, "```Reply to a media file...```")
c_id = await reply_id(event)
if not os.path.isdir("./temp"):
os.mkdir("./temp")
output_file = os.path.join("./temp", "jisan.jpg")
output = await _codtools.media_to_pic(event, reply_message)
outputt = convert_toimage(output[1], filename="./temp/jisan.jpg")
kakashi = await edit_or_reply(event, "```Wait making ASCII...```")
async with event.client.conversation("@asciiart_bot") as conv:
try:
msg = await conv.send_file(output_file)
response = await conv.get_response()
await event.client.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
return await kakashi.edit(
"```Please unblock @asciiart_bot and try again```"
)
if response.text.startswith("Forward"):
await kakashi.edit(
"```can you kindly disable your forward privacy settings for good?```"
)
else:
await kakashi.delete()
await event.client.send_file(
event.chat_id,
response,
reply_to=c_id,
caption=f"**➥ Image Type :** ASCII Art\n**➥ Uploaded By :** {mention}",
)
await event.client.send_read_acknowledge(conv.chat_id)
await event.client.delete_messages(conv.chat_id, [msg.id, response.id])
if os.path.exists(output_file):
os.remove(output_file)
@codex.cod_cmd(
pattern="line ?([\s\S]*)",
command=("line", plugin_category),
info={
"header": "Convert media to line image.",
"description": "Reply to any media files like pic, gif, sticker, video and it will convert into line image.",
"usage": [
"{tr}line <reply to a media>",
],
},
)
async def pussy(event):
"Make a media to line image"
reply_message = await event.get_reply_message()
if not event.reply_to_msg_id or not reply_message.media:
return await edit_delete(event, "```Reply to a media file...```")
c_id = await reply_id(event)
if not os.path.isdir("./temp"):
os.mkdir("./temp")
output_file = os.path.join("./temp", "jisan.jpg")
output = await _codtools.media_to_pic(event, reply_message)
outputt = convert_toimage(output[1], filename="./temp/jisan.jpg")
kakashi = await edit_or_reply(event, "```Processing....```")
async with event.client.conversation("@Lines50Bot") as conv:
try:
msg = await conv.send_file(output_file)
pic = await conv.get_response()
await event.client.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
return await kakashi.edit("```Please unblock @Lines50Bot and try again```")
await kakashi.delete()
await event.client.send_file(
event.chat_id,
pic,
reply_to=c_id,
caption=f"**➥ Image Type :** LINE Art \n**➥ Uploaded By :** {mention}",
)
await event.client.delete_messages(conv.chat_id, [msg.id, pic.id])
if os.path.exists(output_file):
os.remove(output_file)
@codex.cod_cmd(
pattern="clip ?([\s\S]*)",
command=("clip", plugin_category),
info={
"header": "Convert media to sticker by clippy",
"description": "Reply to any media files like pic, gif, sticker, video and it will convert into sticker by clippy.",
"usage": [
"{tr}clip <reply to a media>",
],
},
)
async def cod(event):
"Make a media to clippy sticker"
reply_message = await event.get_reply_message()
if not event.reply_to_msg_id or not reply_message.media:
return await edit_delete(event, "```Reply to a media file...```")
cod = await edit_or_reply(event, "```Processing...```")
c_id = await reply_id(event)
if not os.path.isdir("./temp"):
os.mkdir("./temp")
output_file = os.path.join("./temp", "jisan.jpg")
output = await _codtools.media_to_pic(event, reply_message)
outputt = convert_toimage(output[1], filename="./temp/jisan.jpg")
await cod.delete()
await clippy(event.client, output_file, event.chat_id, c_id)
if os.path.exists(output_file):
os.remove(output_file)
|
import os
import random
import cv2
COLORS = [[random.randint(0, 255) for _ in range(3)] for _ in range(10)]
def draw_gt():
imgs = os.listdir('../dataset/NWPU VHR-10 dataset/positive image set')
for img_name in imgs:
print(img_name)
img = cv2.imread('../dataset/NWPU VHR-10 dataset/positive image set/' + img_name)
ori_width, ori_height = img.shape[1], img.shape[0]
# draw bbox
with open('../dataset/NWPU VHR-10 dataset/ground truth/' + img_name.split('.')[0] + '.txt', 'r') as f:
lines = f.readlines()
for line in lines:
if line == '\n':
break
line = line.replace('(', '').replace(')', '').replace(' ', '').strip().split(',')
x1, y1, x2, y2, c = int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(line[4]) - 1
# x1 y1 x2 y2 normalize to 0~1
x1, y1, x2, y2 = x1 / ori_width, y1 / ori_height, x2 / ori_width, y2 / ori_height
# convert to resize 448
x1, y1, x2, y2 = int(x1 * 448), int(y1 * 448), int(x2 * 448), int(y2 * 448)
img = cv2.resize(img, (448, 448))
img = cv2.rectangle(img, (x1, y1), (x2, y2), COLORS[c - 1], 2)
cv2.rectangle(img, (x1 - 1, y1), (x2 + 1, y1 - 22), COLORS[c - 1], -1, cv2.LINE_AA)
cv2.putText(img, str(c), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(
255, 0, 0), thickness=3)
cv2.imwrite('../dataset/NWPU VHR-10 dataset/gt_image/' + img_name, img)
if __name__ == "__main__":
draw_gt()
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
import numpy.testing as npt
from qiita_core.util import qiita_test_checker
from qiita_core.qiita_settings import qiita_config
import qiita_db as qdb
@qiita_test_checker()
class TestPortal(TestCase):
def setUp(self):
self.portal = qiita_config.portal
self.study = qdb.study.Study(1)
self.analysis = qdb.analysis.Analysis(1)
self.qiita_portal = qdb.portal.Portal('QIITA')
self.emp_portal = qdb.portal.Portal('EMP')
def tearDown(self):
qiita_config.portal = self.portal
def test_list_portals(self):
obs = qdb.portal.Portal.list_portals()
exp = ['EMP']
self.assertEqual(obs, exp)
def test_add_portal(self):
obs = qdb.portal.Portal.create("NEWPORTAL", "SOMEDESC")
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.portal_type")
exp = [[1, 'QIITA', 'QIITA portal. Access to all data stored '
'in database.'],
[2, 'EMP', 'EMP portal'],
[4, 'NEWPORTAL', 'SOMEDESC']]
self.assertItemsEqual(obs, exp)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.analysis_portal")
exp = [[1, 1], [2, 1], [3, 1], [4, 1], [5, 1], [6, 1], [7, 2], [8, 2],
[9, 2], [10, 2], [11, 4], [12, 4], [13, 4], [14, 4]]
self.assertItemsEqual(obs, exp)
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateError):
qdb.portal.Portal.create("EMP", "DOESNTMATTERFORDESC")
qdb.portal.Portal.delete('NEWPORTAL')
def test_remove_portal(self):
qdb.portal.Portal.create("NEWPORTAL", "SOMEDESC")
# Select some samples on a default analysis
qiita_config.portal = "NEWPORTAL"
a = qdb.user.User("test@foo.bar").default_analysis
a.add_samples({1: ['1.SKB8.640193', '1.SKD5.640186']})
qdb.portal.Portal.delete("NEWPORTAL")
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.portal_type")
exp = [[1, 'QIITA', 'QIITA portal. Access to all data stored '
'in database.'],
[2, 'EMP', 'EMP portal']]
self.assertItemsEqual(obs, exp)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.analysis_portal")
exp = [[1, 1], [2, 1], [3, 1], [4, 1], [5, 1], [6, 1], [7, 2], [8, 2],
[9, 2], [10, 2]]
self.assertItemsEqual(obs, exp)
with self.assertRaises(qdb.exceptions.QiitaDBLookupError):
qdb.portal.Portal.delete("NOEXISTPORTAL")
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.portal.Portal.delete("QIITA")
qdb.portal.Portal.create("NEWPORTAL2", "SOMEDESC")
# Add study to this new portal and make sure error raised
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": qdb.study.StudyPerson(2),
"principal_investigator_id": qdb.study.StudyPerson(3),
"lab_person_id": qdb.study.StudyPerson(1)
}
qdb.portal.Portal.create("NEWPORTAL3", "SOMEDESC")
qiita_config.portal = "NEWPORTAL3"
qdb.study.Study.create(
qdb.user.User('test@foo.bar'), "Fried chicken microbiome", info)
qiita_config.portal = "QIITA"
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.portal.Portal.delete("NEWPORTAL3")
def test_check_studies(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.qiita_portal._check_studies([2000000000000, 122222222222222])
def test_check_analyses(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.qiita_portal._check_analyses([2000000000000, 122222222222222])
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.qiita_portal._check_analyses([8, 9])
def test_get_studies_by_portal(self):
obs = self.emp_portal.get_studies()
self.assertEqual(obs, set())
obs = self.qiita_portal.get_studies()
self.assertEqual(obs, {qdb.study.Study(1)})
def test_add_study_portals(self):
obs = qdb.portal.Portal.create("NEWPORTAL4", "SOMEDESC")
obs.add_studies([self.study.id])
self.assertItemsEqual(self.study._portals, ['NEWPORTAL4', 'QIITA'])
npt.assert_warns(qdb.exceptions.QiitaDBWarning, obs.add_studies,
[self.study.id])
obs.remove_studies([self.study.id])
qdb.portal.Portal.delete("NEWPORTAL4")
def test_remove_study_portals(self):
with self.assertRaises(ValueError):
self.qiita_portal.remove_studies([self.study.id])
self.emp_portal.add_studies([1])
# Set up the analysis in EMP portal
self.emp_portal.add_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertItemsEqual(obs, ['QIITA', 'EMP'])
# Test study removal failure
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.emp_portal.remove_studies([self.study.id])
obs = self.study._portals
self.assertItemsEqual(obs, ['QIITA', 'EMP'])
# Test study removal
self.emp_portal.remove_analyses([self.analysis.id])
self.emp_portal.remove_studies([self.study.id])
obs = self.study._portals
self.assertEqual(obs, ['QIITA'])
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning, self.emp_portal.remove_studies,
[self.study.id])
def test_get_analyses_by_portal(self):
qiita_config.portal = 'EMP'
exp = {qdb.analysis.Analysis(7), qdb.analysis.Analysis(8),
qdb.analysis.Analysis(9), qdb.analysis.Analysis(10)}
obs = self.emp_portal.get_analyses()
self.assertEqual(obs, exp)
qiita_config.portal = 'QIITA'
exp = {qdb.analysis.Analysis(1), qdb.analysis.Analysis(2),
qdb.analysis.Analysis(3), qdb.analysis.Analysis(4),
qdb.analysis.Analysis(5), qdb.analysis.Analysis(6)}
obs = self.qiita_portal.get_analyses()
self.assertEqual(obs, exp)
def test_add_analysis_portals(self):
obs = self.analysis._portals
self.assertEqual(obs, ['QIITA'])
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.emp_portal.add_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertEqual(obs, ['QIITA'])
self.emp_portal.add_studies([1])
self.emp_portal.add_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertEqual(obs, ['EMP', 'QIITA'])
npt.assert_warns(
qdb.exceptions.QiitaDBWarning, self.emp_portal.add_analyses,
[self.analysis.id])
self.emp_portal.remove_analyses([self.analysis.id])
self.emp_portal.remove_studies([1])
def test_remove_analysis_portals(self):
with self.assertRaises(ValueError):
self.qiita_portal.remove_analyses([self.analysis.id])
# set up the analysis in EMP portal
self.emp_portal.add_studies([1])
self.emp_portal.add_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertItemsEqual(obs, ['QIITA', 'EMP'])
# Test removal
self.emp_portal.remove_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertEqual(obs, ['QIITA'])
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning, self.emp_portal.remove_analyses,
[self.analysis.id])
self.emp_portal.remove_studies([1])
if __name__ == '__main__':
main()
|
from re import *
from ply.lex import lex
'''
DSVR -> SRs
SRs -> SRs SR
| SR
SR -> RMDEF ID SUBS '.'
t[0] = f'''
def {p[2]}(t):
texto = "_" + texto
while(True):
texto,num1 = subn('_the','o_',texto)
texto,num2 = subn('_cat','gato_',texto)
texto,num3 = subn('_is sleeping','esta a dormir_',texto)
#Caso nenhuma substituição tenha acontecido
if (num1 + num2 + num3 == 0):
texto,num4 = subn('_(.)',r'\1_',texto)
#Caso a nossa marca se encontre no final do texto
if search(r'_$',texto):
return texto'''
SUBS -> SUBS SUB
| SUB
SUB -> TEXTO '->' TEXTO
SUB -> SUBT
: ls,rs = p[1]
: t[0] = f'\tt,n1 = subn(rf'{ls}',rf'{rs},t); n+=n1\n'
''' |
#Constants
position_dict = {
1 : 'Center'
, 2 : 'Left Wing'
, 3 : 'Right Wing'
, 4 : 'Defense'
, 5 : 'Goalie'
}
team_dict = {
1: 'Boston Bruins'
, 2: 'Buffalo Sabres'
, 3: 'Calgary Flames'
, 4: 'Chicago Blackhawks'
, 5: 'Detroit Red Wings'
, 6: 'Edmonton Oilers'
, 7: 'Carolina Hurricanes'
, 8: 'Los Angeles Kings'
, 9: 'Dallas Stars'
, 10: 'Montréal Canadiens'
, 11: 'New Jersey Devils'
, 12: 'New York Islanders'
, 13: 'New York Rangers'
, 14: 'Ottawa Senators'
, 15: 'Philadelphia Flyers'
, 16: 'Pittsburgh Penguins'
, 17: 'Colorado Avalanche'
, 18: 'San Jose Sharks'
, 19: 'St. Louis Blues'
, 20: 'Tampa Bay Lightning'
, 21: 'Toronto Maple Leafs'
, 22: 'Vancouver Canucks'
, 23: 'Washington Capitals'
, 24: 'Arizona Coyotes'
, 25: 'Anaheim Ducks'
, 26: 'Florida Panthers'
, 27: 'Nashville Predators'
, 28: 'Winnipeg Jets'
, 29: 'Columbus Blue Jackets'
, 30: 'Minnesota Wild'
, 37: 'Vegas Golden Knights'
}
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Extractor for datacite xml records, currently for CRCNS datasets
"""
import re
import os.path as op
from collections import OrderedDict
import logging
lgr = logging.getLogger('datalad.metadata.extractors.datacite')
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from datalad.metadata.extractors.base import BaseMetadataExtractor
def _merge(iterable):
"""Merge multiple items into a single one separating with a newline"""
return "\n".join(iterable)
def _unwrap(text):
"""Basic unwrapping of text separated by newlines"""
return re.sub(r'\n\s*', ' ', text)
def _process_tree(tree, nstag):
"""Process XML tree for a record and return a dictionary for our standard
"""
rec = OrderedDict()
for key, tag_, getall, trans1_, transall_ in [
('author', 'creatorName', True, None, None),
('name', "title[@titleType='AlternativeTitle']", False, None, None),
# actually it seems we have no title but "ShortDescription"!!! TODO
#('title', "title", False, _unwrap, None),
('shortdescription', "title", False, _unwrap, None),
('description', 'description', True, _unwrap, _merge),
('version', 'version', False, None, None),
('sameas', "identifier[@identifierType='DOI']", False, None, None),
# conflicts with our notion for having a "type" to be internal and to demarkate a Dataset
# here might include the field e.g. Dataset/Neurophysiology, so skipping for now
# ('type', "resourceType[@resourceTypeGeneral='Dataset']", False, None, None),
('citation', "relatedIdentifier", True, None, None),
('tag', "subject", True, None, None),
('formats', "format", True, None, None),
]:
trans1 = trans1_ or (lambda x: x)
text = lambda x: trans1(x.text.strip())
tag = nstag(tag_)
try:
if getall:
value = list(map(text, tree.findall(tag)))
else:
value = text(tree.find(tag))
except AttributeError:
continue
if not value or value == ['']:
continue
if transall_:
value = transall_(value)
rec[key] = value
return rec
class MetadataExtractor(BaseMetadataExtractor):
def _get_dataset_metadata(self):
canonical = op.join(self.ds.path, '.datalad', 'meta.datacite.xml')
# look for the first matching filename and go with it
fname = [canonical] if op.lexists(canonical) else \
[op.join(self.ds.path, f) for f in self.paths
if op.basename(f) == 'meta.datacite.xml']
if not fname or not op.lexists(fname[0]):
return {}
fname = fname[0]
# those namespaces are a b.ch
# TODO: avoid reading file twice
namespaces = dict([
node for _, node in ET.iterparse(
open(fname), events=('start-ns',)
)
])
ns = namespaces['']
def nstag(tag):
return './/{%s}%s' % (ns, tag)
tree = ET.ElementTree(file=fname)
return _process_tree(tree, nstag)
def _get_content_metadata(self):
return [] # no content metadata provided
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
'''
==================================
= mixingcycles_Isoclines.py
==================================
Computes isoclines of the dynamics of average inoculum sizes over multiple cycles
Lukas Geyrhofer, l.geyrhofer@technion.ac.il, 2018
'''
import numpy as np
import argparse
import sys,math
import pickle
from skimage import measure
import growthclasses as gc
def write_contours_to_file(contours, filename, axis1, axis2):
fp = open(filename,"w")
for c in contours:
for i in range(len(c)):
ix = int(np.floor(c[i,0]))
iy = int(np.floor(c[i,1]))
px = c[i,0] - ix
py = c[i,1] - iy
try: cx = (1.-px)*axis1[ix] + px*axis1[ix+1]
except: cx = axis1[ix]
try: cy = (1.-py)*axis2[iy] + py*axis2[iy+1]
except: cy = axis2[iy]
fp.write('{:14.6e} {:14.6e}\n'.format(cx,cy))
fp.write('\n')
fp.close()
def main():
parser = argparse.ArgumentParser()
parser_io = parser.add_argument_group(description = "==== I/O parameters ====")
parser_io.add_argument("-i","--infile",required=True)
parser_io.add_argument("-o","--baseoutfilename",default="out")
parser_io.add_argument("-v","--verbose",action="store_true",default=False)
parser_io.add_argument("-S","--OutputSinglestrainNullclines",action="store_true",default=False)
parser = gc.AddLatticeParameters(parser)
parser = gc.AddDilutionParameters(parser)
args=parser.parse_args()
g = gc.LoadGM(**vars(args))
dlist = gc.getDilutionList(**vars(args))
# get new axes, which depends on parameters above (in lattice parameter group)
axis1,axis2 = gc.getInoculumAxes(**vars(args)) # either (n,x) or [ (n1,n2) if args.AbsoluteCoordinates == True ]
shape = (len(axis1),len(axis2))
# loaded from pickle file
m1,m2 = g.growthmatrixgrid
gm1 = g.growthmatrix[:,:,0]
gm2 = g.growthmatrix[:,:,1]
# matrices to store averages
g1 = np.zeros(shape,dtype=np.float64) # avg'd growth strain 1
g2 = np.zeros(shape,dtype=np.float64) # avg'd growth strain 2
rr1 = np.zeros(shape,dtype=np.float64) # avg'd ratio of strains at end
r1 = np.zeros(shape,dtype=np.float64) # avg'd ratio of strains at beginning
sn1 = np.zeros(shape,dtype=np.float64) # number of cells of strain 1 in new matrix shape
sn2 = np.zeros(shape,dtype=np.float64) # number of cells of strain 1 in new matrix shape
# get all averages and store them in the appropriate matrices
for i,a1 in enumerate(axis1):
for j,a2 in enumerate(axis2):
sn1[i,j],sn2[i,j] = gc.TransformInoculum([a1,a2],inabs = args.AbsoluteCoordinates, outabs = True)
g1[i,j] = gc.SeedingAverage(gm1, [sn1[i,j],sn2[i,j]])
g2[i,j] = gc.SeedingAverage(gm2, [sn1[i,j],sn2[i,j]])
rr1[g1+g2>0] = (g1[g1+g2>0])/((g1+g2)[g1+g2>0])
r1[sn1+sn2>0] = (sn1[sn1+sn2>0])/((sn1+sn2)[sn1+sn2>0])
# output
if args.verbose:
sys.stdout.write('\n computing nullcline for fraction of strains\n')
cont_xx = measure.find_contours(rr1 - r1,0)
write_contours_to_file(cont_xx,args.baseoutfilename + '_X',axis1,axis2)
for dilution in dlist:
if args.verbose:
sys.stdout.write(' computing nullclines for dilution D = {:.4e}\n'.format(dilution))
cont_nn = measure.find_contours((g1 + g2) * dilution - sn1 - sn2,0)
write_contours_to_file(cont_nn,args.baseoutfilename + '_N_D{:.3e}'.format(dilution),axis1,axis2)
if args.OutputSinglestrainNullclines:
cont_n1 = measure.find_contours(g1 * dilution - sn1,0)
cont_n2 = measure.find_contours(g2 * dilution - sn2,0)
write_contours_to_file(cont_n1,args.baseoutfilename + '_1_D{:.3e}'.format(dilution),axis1,axis2)
write_contours_to_file(cont_n2,args.baseoutfilename + '_2_D{:.3e}'.format(dilution),axis1,axis2)
if __name__ == "__main__":
main()
|
from django.contrib import admin
from .models import Paste
# Register your models here.
admin.site.register(Paste) |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/NutritionOrder
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import backboneelement, domainresource, fhirtypes
class NutritionOrder(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Diet, formula or nutritional supplement request.
A request to supply a diet, formula feeding (enteral) or oral nutritional
supplement to a patient/resident.
"""
resource_type = Field("NutritionOrder", const=True)
allergyIntolerance: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="allergyIntolerance",
title=(
"List of the patient's food and nutrition-related allergies and "
"intolerances"
),
description=(
"A link to a record of allergies or intolerances which should be "
"included in the nutrition order."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["AllergyIntolerance"],
)
dateTime: fhirtypes.DateTime = Field(
...,
alias="dateTime",
title="Date and time the nutrition order was requested",
description="The date and time that this nutrition order was requested.",
# if property is element of this resource.
element_property=True,
)
dateTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_dateTime", title="Extension field for ``dateTime``."
)
encounter: fhirtypes.ReferenceType = Field(
None,
alias="encounter",
title="The encounter associated with this nutrition order",
description=(
"An encounter that provides additional information about the healthcare"
" context in which this request is made."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Encounter"],
)
enteralFormula: fhirtypes.NutritionOrderEnteralFormulaType = Field(
None,
alias="enteralFormula",
title="Enteral formula components",
description=(
"Feeding provided through the gastrointestinal tract via a tube, "
"catheter, or stoma that delivers nutrition distal to the oral cavity."
),
# if property is element of this resource.
element_property=True,
)
excludeFoodModifier: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="excludeFoodModifier",
title=(
"Order-specific modifier about the type of food that should not be " "given"
),
description=(
"This modifier is used to convey order-specific modifiers about the "
"type of food that should NOT be given. These can be derived from "
"patient allergies, intolerances, or preferences such as No Red Meat, "
"No Soy or No Wheat or Gluten-Free. While it should not be necessary "
"to repeat allergy or intolerance information captured in the "
"referenced AllergyIntolerance resource in the excludeFoodModifier, "
"this element may be used to convey additional specificity related to "
"foods that should be eliminated from the patient\u2019s diet for any "
"reason. This modifier applies to the entire nutrition order inclusive"
" of the oral diet, nutritional supplements and enteral formula "
"feedings."
),
# if property is element of this resource.
element_property=True,
)
foodPreferenceModifier: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="foodPreferenceModifier",
title="Order-specific modifier about the type of food that should be given",
description=(
"This modifier is used to convey order-specific modifiers about the "
"type of food that should be given. These can be derived from patient "
"allergies, intolerances, or preferences such as Halal, Vegan or "
"Kosher. This modifier applies to the entire nutrition order inclusive "
"of the oral diet, nutritional supplements and enteral formula "
"feedings."
),
# if property is element of this resource.
element_property=True,
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Identifiers assigned to this order",
description=(
"Identifiers assigned to this order by the order sender or by the order"
" receiver."
),
# if property is element of this resource.
element_property=True,
)
oralDiet: fhirtypes.NutritionOrderOralDietType = Field(
None,
alias="oralDiet",
title="Oral diet components",
description="Diet given orally in contrast to enteral (tube) feeding.",
# if property is element of this resource.
element_property=True,
)
orderer: fhirtypes.ReferenceType = Field(
None,
alias="orderer",
title="Who ordered the diet, formula or nutritional supplement",
description=(
"The practitioner that holds legal responsibility for ordering the "
"diet, nutritional supplement, or formula feedings."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner"],
)
patient: fhirtypes.ReferenceType = Field(
...,
alias="patient",
title="The person who requires the diet, formula or nutritional supplement",
description=(
"The person (patient) who needs the nutrition order for an oral diet, "
"nutritional supplement and/or enteral or formula feeding."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Patient"],
)
status: fhirtypes.Code = Field(
None,
alias="status",
title=(
"proposed | draft | planned | requested | active | on-hold | completed "
"| cancelled | entered-in-error"
),
description="The workflow status of the nutrition order/request.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"proposed",
"draft",
"planned",
"requested",
"active",
"on-hold",
"completed",
"cancelled",
"entered-in-error",
],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
supplement: ListType[fhirtypes.NutritionOrderSupplementType] = Field(
None,
alias="supplement",
title="Supplement components",
description=(
"Oral nutritional products given in order to add further nutritional "
"value to the patient's diet."
),
# if property is element of this resource.
element_property=True,
)
class NutritionOrderEnteralFormula(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Enteral formula components.
Feeding provided through the gastrointestinal tract via a tube, catheter,
or stoma that delivers nutrition distal to the oral cavity.
"""
resource_type = Field("NutritionOrderEnteralFormula", const=True)
additiveProductName: fhirtypes.String = Field(
None,
alias="additiveProductName",
title="Product or brand name of the modular additive",
description=(
"The product or brand name of the type of modular component to be added"
" to the formula."
),
# if property is element of this resource.
element_property=True,
)
additiveProductName__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_additiveProductName",
title="Extension field for ``additiveProductName``.",
)
additiveType: fhirtypes.CodeableConceptType = Field(
None,
alias="additiveType",
title="Type of modular component to add to the feeding",
description=(
"Indicates the type of modular component such as protein, carbohydrate,"
" fat or fiber to be provided in addition to or mixed with the base "
"formula."
),
# if property is element of this resource.
element_property=True,
)
administration: ListType[
fhirtypes.NutritionOrderEnteralFormulaAdministrationType
] = Field(
None,
alias="administration",
title="Formula feeding instruction as structured data",
description=(
"Formula administration instructions as structured data. This "
"repeating structure allows for changing the administration rate or "
"volume over time for both bolus and continuous feeding. An example of"
" this would be an instruction to increase the rate of continuous "
"feeding every 2 hours."
),
# if property is element of this resource.
element_property=True,
)
administrationInstruction: fhirtypes.String = Field(
None,
alias="administrationInstruction",
title="Formula feeding instructions expressed as text",
description=(
"Free text formula administration, feeding instructions or additional "
"instructions or information."
),
# if property is element of this resource.
element_property=True,
)
administrationInstruction__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_administrationInstruction",
title="Extension field for ``administrationInstruction``.",
)
baseFormulaProductName: fhirtypes.String = Field(
None,
alias="baseFormulaProductName",
title="Product or brand name of the enteral or infant formula",
description=(
"The product or brand name of the enteral or infant formula product "
'such as "ACME Adult Standard Formula".'
),
# if property is element of this resource.
element_property=True,
)
baseFormulaProductName__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_baseFormulaProductName",
title="Extension field for ``baseFormulaProductName``.",
)
baseFormulaType: fhirtypes.CodeableConceptType = Field(
None,
alias="baseFormulaType",
title="Type of enteral or infant formula",
description=(
"The type of enteral or infant formula such as an adult standard "
"formula with fiber or a soy-based infant formula."
),
# if property is element of this resource.
element_property=True,
)
caloricDensity: fhirtypes.QuantityType = Field(
None,
alias="caloricDensity",
title="Amount of energy per specified volume that is required",
description=(
"The amount of energy (calories) that the formula should provide per "
"specified volume, typically per mL or fluid oz. For example, an "
"infant may require a formula that provides 24 calories per fluid ounce"
" or an adult may require an enteral formula that provides 1.5 "
"calorie/mL."
),
# if property is element of this resource.
element_property=True,
)
maxVolumeToDeliver: fhirtypes.QuantityType = Field(
None,
alias="maxVolumeToDeliver",
title="Upper limit on formula volume per unit of time",
description=(
"The maximum total quantity of formula that may be administered to a "
"subject over the period of time, e.g. 1440 mL over 24 hours."
),
# if property is element of this resource.
element_property=True,
)
routeofAdministration: fhirtypes.CodeableConceptType = Field(
None,
alias="routeofAdministration",
title="How the formula should enter the patient's gastrointestinal tract",
description=(
"The route or physiological path of administration into the patient's "
"gastrointestinal tract for purposes of providing the formula feeding,"
" e.g. nasogastric tube."
),
# if property is element of this resource.
element_property=True,
)
class NutritionOrderEnteralFormulaAdministration(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Formula feeding instruction as structured data.
Formula administration instructions as structured data. This repeating
structure allows for changing the administration rate or volume over time
for both bolus and continuous feeding. An example of this would be an
instruction to increase the rate of continuous feeding every 2 hours.
"""
resource_type = Field("NutritionOrderEnteralFormulaAdministration", const=True)
quantity: fhirtypes.QuantityType = Field(
None,
alias="quantity",
title="The volume of formula to provide",
description=(
"The volume of formula to provide to the patient per the specified "
"administration schedule."
),
# if property is element of this resource.
element_property=True,
)
rateQuantity: fhirtypes.QuantityType = Field(
None,
alias="rateQuantity",
title="Speed with which the formula is provided per period of time",
description=(
"The rate of administration of formula via a feeding pump, e.g. 60 mL "
"per hour, according to the specified schedule."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e rate[x]
one_of_many="rate",
one_of_many_required=False,
)
rateRatio: fhirtypes.RatioType = Field(
None,
alias="rateRatio",
title="Speed with which the formula is provided per period of time",
description=(
"The rate of administration of formula via a feeding pump, e.g. 60 mL "
"per hour, according to the specified schedule."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e rate[x]
one_of_many="rate",
one_of_many_required=False,
)
schedule: fhirtypes.TimingType = Field(
None,
alias="schedule",
title="Scheduled frequency of enteral feeding",
description=(
"The time period and frequency at which the enteral formula should be "
"delivered to the patient."
),
# if property is element of this resource.
element_property=True,
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {"rate": ["rateQuantity", "rateRatio"]}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class NutritionOrderOralDiet(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Oral diet components.
Diet given orally in contrast to enteral (tube) feeding.
"""
resource_type = Field("NutritionOrderOralDiet", const=True)
fluidConsistencyType: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="fluidConsistencyType",
title="The required consistency of fluids and liquids provided to the patient",
description=(
"The required consistency (e.g. honey-thick, nectar-thick, thin, "
"thickened.) of liquids or fluids served to the patient."
),
# if property is element of this resource.
element_property=True,
)
instruction: fhirtypes.String = Field(
None,
alias="instruction",
title="Instructions or additional information about the oral diet",
description=(
"Free text or additional instructions or information pertaining to the "
"oral diet."
),
# if property is element of this resource.
element_property=True,
)
instruction__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_instruction", title="Extension field for ``instruction``."
)
nutrient: ListType[fhirtypes.NutritionOrderOralDietNutrientType] = Field(
None,
alias="nutrient",
title="Required nutrient modifications",
description=(
"Class that defines the quantity and type of nutrient modifications "
"(for example carbohydrate, fiber or sodium) required for the oral "
"diet."
),
# if property is element of this resource.
element_property=True,
)
schedule: ListType[fhirtypes.TimingType] = Field(
None,
alias="schedule",
title="Scheduled frequency of diet",
description=(
"The time period and frequency at which the diet should be given. The "
"diet should be given for the combination of all schedules if more than"
" one schedule is present."
),
# if property is element of this resource.
element_property=True,
)
texture: ListType[fhirtypes.NutritionOrderOralDietTextureType] = Field(
None,
alias="texture",
title="Required texture modifications",
description=(
"Class that describes any texture modifications required for the "
"patient to safely consume various types of solid foods."
),
# if property is element of this resource.
element_property=True,
)
type: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="type",
title=(
"Type of oral diet or diet restrictions that describe what can be "
"consumed orally"
),
description=(
"The kind of diet or dietary restriction such as fiber restricted diet "
"or diabetic diet."
),
# if property is element of this resource.
element_property=True,
)
class NutritionOrderOralDietNutrient(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Required nutrient modifications.
Class that defines the quantity and type of nutrient modifications (for
example carbohydrate, fiber or sodium) required for the oral diet.
"""
resource_type = Field("NutritionOrderOralDietNutrient", const=True)
amount: fhirtypes.QuantityType = Field(
None,
alias="amount",
title="Quantity of the specified nutrient",
description="The quantity of the specified nutrient to include in diet.",
# if property is element of this resource.
element_property=True,
)
modifier: fhirtypes.CodeableConceptType = Field(
None,
alias="modifier",
title="Type of nutrient that is being modified",
description="The nutrient that is being modified such as carbohydrate or sodium.",
# if property is element of this resource.
element_property=True,
)
class NutritionOrderOralDietTexture(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Required texture modifications.
Class that describes any texture modifications required for the patient to
safely consume various types of solid foods.
"""
resource_type = Field("NutritionOrderOralDietTexture", const=True)
foodType: fhirtypes.CodeableConceptType = Field(
None,
alias="foodType",
title=(
"Concepts that are used to identify an entity that is ingested for "
"nutritional purposes"
),
description=(
"The food type(s) (e.g. meats, all foods) that the texture "
"modification applies to. This could be all foods types."
),
# if property is element of this resource.
element_property=True,
)
modifier: fhirtypes.CodeableConceptType = Field(
None,
alias="modifier",
title="Code to indicate how to alter the texture of the foods, e.g. pureed",
description=(
"Any texture modifications (for solid foods) that should be made, e.g. "
"easy to chew, chopped, ground, and pureed."
),
# if property is element of this resource.
element_property=True,
)
class NutritionOrderSupplement(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Supplement components.
Oral nutritional products given in order to add further nutritional value
to the patient's diet.
"""
resource_type = Field("NutritionOrderSupplement", const=True)
instruction: fhirtypes.String = Field(
None,
alias="instruction",
title="Instructions or additional information about the oral supplement",
description=(
"Free text or additional instructions or information pertaining to the "
"oral supplement."
),
# if property is element of this resource.
element_property=True,
)
instruction__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_instruction", title="Extension field for ``instruction``."
)
productName: fhirtypes.String = Field(
None,
alias="productName",
title="Product or brand name of the nutritional supplement",
description=(
'The product or brand name of the nutritional supplement such as "Acme '
'Protein Shake".'
),
# if property is element of this resource.
element_property=True,
)
productName__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_productName", title="Extension field for ``productName``."
)
quantity: fhirtypes.QuantityType = Field(
None,
alias="quantity",
title="Amount of the nutritional supplement",
description="The amount of the nutritional supplement to be given.",
# if property is element of this resource.
element_property=True,
)
schedule: ListType[fhirtypes.TimingType] = Field(
None,
alias="schedule",
title="Scheduled frequency of supplement",
description=(
"The time period and frequency at which the supplement(s) should be "
"given. The supplement should be given for the combination of all "
"schedules if more than one schedule is present."
),
# if property is element of this resource.
element_property=True,
)
type: fhirtypes.CodeableConceptType = Field(
None,
alias="type",
title="Type of supplement product requested",
description=(
"The kind of nutritional supplement product required such as a high "
"protein or pediatric clear liquid supplement."
),
# if property is element of this resource.
element_property=True,
)
|
class GameError(Exception):
def __init__(self, message):
self.message = message
def __format__(self):
return self.message
def __repr__(self):
return f"{self.__class__.__name__}: {self.message}"
class IllegalMove(GameError):
pass
|
from D5_coffee_data import MENU, resources
#python programming for implementing a simple coffee machine
profit = 0 #initial value in machine set to 0
is_on = True
# TODO 1: Prompt user by asking “What would you like? (espresso/latte/cappuccino):
# TODO 2:Turn off the Coffee Machine by entering “off” to the prompt.
# TODO 3:Print report.
# def report():
# TODO 4:Check resources sufficient?
def is_reource_sufficient(order_ing):
"""
Take order ingredients from user chosen option of coffee
Return the compared with resources
"""
for item in order_ing:
if order_ing[item] >= resources[item]:
print(f"Sorry there is not enough {item}.\n")
return False
return True
# TODO 5:Process coins
def process_coin():
#returns the total calculated from the inserted coins
print("Please insert coins.\n")
total = int(input("how many quarters?: ")) * 0.25
total += int(input("how many dimes?: ")) * 0.1
total += int(input("how many nickles?: ")) * 0.05
total += int(input("how many pennies?: ")) * 0.01
return total
# TODO 6:Check transaction successfull
def is_transation_succesful(mr,cost):
"""Returns True when payment is accepted"""
if mr > cost:
change = round(mr - cost,2)
print(f"\nHere is ${change} $ in change.\n")
global profit
profit += cost
return True
else:
print("Sorry there is not enough money. Money reunded")
# TODO 7: Make Coffee.
def make_coffee(drink_name, order_ingredients):
"""deduct the required engridients from the list of resources"""
for item in order_ingredients:
resources[item] -= order_ingredients[item]
print(f"Here is your {drink_name}.\n")
if __name__ == "__main__":
while is_on:
#looping until the user input offs
user_input = input("What would you like? (espresso/latte/cappuccino): ")
if user_input == "off":
is_on = False
elif user_input == "report":
for k, v in resources.items():
print(k, v)
print(f"Money: ${profit}")
else:
drink = MENU[user_input]
if is_reource_sufficient(drink["ingredients"]): #getting hold of the values under the key ingredients under specific coffee option
payment = process_coin()
if is_transation_succesful(payment, drink["cost"]):
make_coffee(user_input,drink["ingredients"])
# main() |
from django import forms
from multiselectfield import MultiSelectFormField
from tfg_webapp.models import DataFile, ReportSettings
class DataFileForm(forms.ModelForm):
class Meta:
model = DataFile
fields = ('data_file',)
labels = {
'data_file': ''
}
class ColumnsForm(forms.Form):
_COLUMNS = {'Mean', 'Std', 'Max', 'Min', 'MAGE'}
def __init__(self, *args, **kwargs):
super(ColumnsForm, self).__init__(*args, **kwargs)
self.fields['columns'] = MultiSelectFormField(choices=ReportSettings.COLUMN_TYPES)
def is_valid(self):
return super(ColumnsForm, self).is_valid() and set(self.cleaned_data['columns']) <= self._COLUMNS
|
Server_ip = "182.294.242.181"
Token = '121899714:AAEXTAOSsuyFIb6Nydku3GhBM9sJtvstn8M'
The_group_id_you_wanna_forward = 0 # change it
if The_group_id_you_wanna_forward == 0:
print(f"""
You have to set the The_group_id_you_wanna_forward varable to use this program.
""")
exit()
from king_chat import Client
client = Client(name="telegram", ip=Server_ip, port=5920)
from telegram.ext import Updater
from telegram import Bot
updater = Updater(token=Token)
dispatcher = updater.dispatcher
last_user_id = None
def format_msg(user_name, text):
text = text.strip(' \n')
return '{user_name}:\n\n\n{text}'.format(user_name=user_name, text=text)
def echo(bot, update):
global The_group_id_you_wanna_forward
global last_user_id
user_name = update.message.from_user.full_name
print('Your goup id: ', update.message.chat_id)
if The_group_id_you_wanna_forward != 0:
if update.message.chat_id == The_group_id_you_wanna_forward:
client.send(format_msg(user_name, update.message.text))
else:
last_user_id = update.message.chat_id
client.send(format_msg(user_name, update.message.text))
from telegram.ext import MessageHandler, Filters
echo_handler = MessageHandler(Filters.text, echo)
dispatcher.add_handler(echo_handler)
@client.on_received
def on_received(protocol, text):
global Token, Bot
my_bot = Bot(token=Token)
global The_group_id_you_wanna_forward
global last_user_id
print(text)
if The_group_id_you_wanna_forward != 0:
my_bot.send_message(chat_id=The_group_id_you_wanna_forward, text=text)
elif last_user_id != None:
my_bot.send_message(chat_id=last_user_id, text=text)
client.start(wait=False)
updater.start_polling()
|
from setuptools import setup, find_packages
def load_requirements(path):
with open(path) as fin:
return [
line
for line in map(lambda l: l.strip(), fin.readlines())
if line and not line.startswith('#')
]
requirements = load_requirements('requirements.txt')
setup(
name='sru_lm',
version='0.1.2',
packages=find_packages(),
install_requires=requirements,
)
|
import cv2 as cv
import numpy as np
img = cv.imread("D:/Programming/Python_Projects/Computer_Vision/ROV/FishSize/Source/" + "samplefromvideo_1.png")
cv.imshow('Park', img)
blank = np.zeros(img.shape[:2], dtype='uint8')
b,g,r = cv.split(img)
blue = cv.merge([b,blank,blank])
green = cv.merge([blank,g,blank])
red = cv.merge([blank,blank,r])
cv.imshow('Blue', blue)
cv.imshow('Green', green)
cv.imshow('Red', red)
print(img.shape)
print(b.shape)
print(g.shape)
print(r.shape)
merged = cv.merge([b,g,r])
cv.imshow('Merged Image', merged)
cv.waitKey(0) |
import torch
from scipy.optimize import nnls
def Fixed_Weight_Greedy_Parallel(A, b, val_set_size, nnz=None, device="cpu"):
'''approximately solves min_x ||Ax - b||_2 s.t. x_i \in \{0,1\}
Args:
A: design matrix of size (d, n)
b: measurement vector of length d
nnz = maximum number of nonzero coefficients (if None set to n)
Returns:
vector of length n
'''
d, n = A.shape
if nnz is None:
nnz = n
x = torch.zeros(n, device=device) # ,dtype=torch.float64)
# Calculate b * n / val_set_size
b_k_val = (n / val_set_size) * b
# Stores memoized version of Ax, where x is the vector containing an element from {0,1}
memoized_Ax = torch.zeros(d, device = device)
for i in range(nnz):
# Calculate residual
resid = memoized_Ax - b_k_val
# Calculate columnwise difference with resid.
# resid[:None] promotes shape from (d) to (d,)
gain_norms = (A + resid[:,None]).norm(dim=0)
# Choose smallest norm from among columns of A not already chosen.
# Such a vector represents the best greedy choice in matching to
# the residual.
zero_x = torch.nonzero(x == 0)
argmin = torch.argmin(gain_norms[zero_x])
actual_index = zero_x[argmin].item()
# Add this column to the memoized Ax. Set this weight to 1.
x[actual_index] = 1
memoized_Ax = memoized_Ax + A[:,actual_index]
"""
if gain_norms[actual_index] < resid_norm:
x[actual_index] = 1
memoized_Ax = memoized_Ax + A[:,actual_index]
else:
break
"""
return x
# NOTE: Standard Algorithm, e.g. Tropp, ``Greed is Good: Algorithmic Results for Sparse Approximation," IEEE Trans. Info. Theory, 2004.
def OrthogonalMP_REG_Parallel(A, b, tol=1E-4, nnz=None, positive=False, lam=1, device="cpu"):
'''approximately solves min_x |x|_0 s.t. Ax=b using Orthogonal Matching Pursuit
Args:
A: design matrix of size (d, n)
b: measurement vector of length d
tol: solver tolerance
nnz = maximum number of nonzero coefficients (if None set to n)
positive: only allow positive nonzero coefficients
Returns:
vector of length n
'''
AT = torch.transpose(A, 0, 1)
d, n = A.shape
if nnz is None:
nnz = n
x = torch.zeros(n, device=device) # ,dtype=torch.float64)
resid = b.detach().clone()
normb = b.norm().item()
indices = []
for i in range(nnz):
if resid.norm().item() / normb < tol:
break
projections = torch.matmul(AT, resid)
if positive:
index = torch.argmax(projections)
else:
index = torch.argmax(torch.abs(projections))
if index in indices:
break
indices.append(index)
if len(indices) == 1:
A_i = A[:, index]
x_i = projections[index] / torch.dot(A_i, A_i).view(-1)
A_i = A[:, index].view(1, -1)
else:
A_i = torch.cat((A_i, A[:, index].view(1, -1)), dim=0)
temp = torch.matmul(A_i, torch.transpose(A_i, 0, 1)) + lam * torch.eye(A_i.shape[0], device=device)
# If constrained to be positive, use nnls. Otherwise, use torch's lstsq method.
if positive:
x_i, _ = nnls(temp.cpu().numpy(), torch.matmul(A_i, b).view(-1, 1).cpu().numpy()[:,0])
x_i = torch.Tensor(x_i).cuda()
else:
x_i, _ = torch.lstsq(torch.matmul(A_i, b).view(-1, 1), temp)
resid = b - torch.matmul(torch.transpose(A_i, 0, 1), x_i).view(-1)
x_i = x_i.view(-1)
for i, index in enumerate(indices):
try:
x[index] += x_i[i]
except IndexError:
x[index] += x_i
return x |
"""
Default settings for the Richie courses app.
If you use Django Configuration for your settings, you can use our mixin to import these
default settings:
```
from configurations import Configuration
from richie.apps.courses.settings.mixins import RichieCoursesConfigurationMixin
class MyConfiguration(RichieCoursesConfigurationMixin, Configuration):
...
```
Otherwise, you can just use the usual Django pattern in your settings.py file:
```
from richie.apps.courses.settings import *
```
"""
from django.utils.translation import gettext_lazy as _
# Associated LMS backends
RICHIE_LMS_BACKENDS = []
# Easy Thumbnails
THUMBNAIL_PROCESSORS = (
"easy_thumbnails.processors.colorspace",
"easy_thumbnails.processors.autocrop",
"filer.thumbnail_processors.scale_and_crop_with_subject_location",
"easy_thumbnails.processors.filters",
"easy_thumbnails.processors.background",
)
# Django CMS
CMS_TEMPLATES = (
("courses/cms/course_detail.html", _("Course page")),
("courses/cms/organization_list.html", _("Organization list")),
("courses/cms/organization_detail.html", _("Organization page")),
("courses/cms/category_list.html", _("Category list")),
("courses/cms/category_detail.html", _("Category page")),
("courses/cms/blogpost_list.html", _("Blog post list")),
("courses/cms/blogpost_detail.html", _("Blog post page")),
("courses/cms/person_detail.html", _("Person page")),
("courses/cms/person_list.html", _("Person list")),
("courses/cms/program_detail.html", _("Program page")),
("courses/cms/program_list.html", _("Program list")),
("search/search.html", _("Search")),
("richie/child_pages_list.html", _("List of child pages")),
("richie/homepage.html", _("Homepage")),
("richie/single_column.html", _("Single column")),
)
CMS_PLACEHOLDER_CONF = {
# -- Static Placeholders
# Footer
"footer": {
"name": _("Footer"),
"plugins": ["NestedItemPlugin", "LinkPlugin"],
"NestedItemPlugin": ["LinkPlugin"],
},
"static_blogpost_headline": {
"name": _("Static headline"),
"plugins": ["SectionPlugin", "CKEditorPlugin"],
"child_classes": {"SectionPlugin": ["CKEditorPlugin"]},
},
# -- Page Placeholders
# Homepage
"richie/homepage.html maincontent": {
"name": _("Main content"),
"plugins": ["LargeBannerPlugin", "SectionPlugin"],
"child_classes": {
"SectionPlugin": [
"BlogPostPlugin",
"CategoryPlugin",
"CoursePlugin",
"CKEditorPlugin",
"GlimpsePlugin",
"LinkPlugin",
"NestedItemPlugin",
"OrganizationsByCategoryPlugin",
"OrganizationPlugin",
"PersonPlugin",
"ProgramPlugin",
"SectionPlugin",
],
"NestedItemPlugin": ["CategoryPlugin"],
},
},
# Single column page
"richie/single_column.html maincontent": {
"name": _("Main content"),
"excluded_plugins": ["CKEditorPlugin", "GoogleMapPlugin"],
"parent_classes": {
"BlogPostPlugin": ["SectionPlugin"],
"CategoryPlugin": ["SectionPlugin"],
"CoursePlugin": ["SectionPlugin"],
"GlimpsePlugin": ["SectionPlugin"],
"OrganizationPlugin": ["SectionPlugin"],
"OrganizationsByCategoryPlugin": ["SectionPlugin"],
"PersonPlugin": ["SectionPlugin"],
"ProgramPlugin": ["SectionPlugin"],
},
"child_classes": {
"SectionPlugin": [
"BlogPostPlugin",
"CategoryPlugin",
"CoursePlugin",
"GlimpsePlugin",
"LinkPlugin",
"NestedItemPlugin",
"OrganizationsByCategoryPlugin",
"OrganizationPlugin",
"PersonPlugin",
"ProgramPlugin",
],
"NestedItemPlugin": ["NestedItemPlugin", "LinkPlugin"],
},
},
# Course detail
"courses/cms/course_detail.html course_cover": {
"name": _("Cover"),
"plugins": ["SimplePicturePlugin"],
"limits": {"SimplePicturePlugin": 1},
},
"courses/cms/course_detail.html course_introduction": {
"name": _("Catch phrase"),
"plugins": ["PlainTextPlugin"],
"limits": {"PlainTextPlugin": 1},
},
"courses/cms/course_detail.html course_teaser": {
"name": _("Teaser"),
"plugins": ["VideoPlayerPlugin"],
"limits": {"VideoPlayerPlugin": 1},
},
"courses/cms/course_detail.html course_description": {
"name": _("About the course"),
"plugins": ["CKEditorPlugin"],
"limits": {"CKEditorPlugin": 1},
},
"courses/cms/course_detail.html course_skills": {
"name": _("What you will learn"),
"plugins": ["CKEditorPlugin"],
},
"courses/cms/course_detail.html course_format": {
"name": _("Format"),
"plugins": ["CKEditorPlugin"],
},
"courses/cms/course_detail.html course_prerequisites": {
"name": _("Prerequisites"),
"plugins": ["CKEditorPlugin"],
},
"courses/cms/course_detail.html course_team": {
"name": _("Team"),
"plugins": ["PersonPlugin"],
},
"courses/cms/course_detail.html course_plan": {
"name": _("Plan"),
"plugins": ["NestedItemPlugin"],
"child_classes": {"NestedItemPlugin": ["NestedItemPlugin"]},
},
"courses/cms/course_detail.html course_information": {
"name": _("Complementary information"),
"plugins": ["SectionPlugin"],
"parent_classes": {
"CKEditorPlugin": ["SectionPlugin"],
"SimplePicturePlugin": ["SectionPlugin"],
"GlimpsePlugin": ["SectionPlugin"],
},
"child_classes": {
"SectionPlugin": ["CKEditorPlugin", "SimplePicturePlugin", "GlimpsePlugin"]
},
},
"courses/cms/course_detail.html course_license_content": {
"name": _("License for the course content"),
"plugins": ["LicencePlugin"],
"limits": {"LicencePlugin": 1},
},
"courses/cms/course_detail.html course_license_participation": {
"name": _("License for the content created by course participants"),
"plugins": ["LicencePlugin"],
"limits": {"LicencePlugin": 1},
},
"courses/cms/course_detail.html course_categories": {
"name": _("Categories"),
"plugins": ["CategoryPlugin"],
},
"courses/cms/course_detail.html course_icons": {
"name": _("Icon"),
"plugins": ["CategoryPlugin"],
"limits": {"CategoryPlugin": 1},
},
"courses/cms/course_detail.html course_organizations": {
"name": _("Organizations"),
"plugins": ["OrganizationPlugin"],
},
"courses/cms/course_detail.html course_assessment": {
"name": _("Assessment and Certification"),
"plugins": ["CKEditorPlugin"],
},
# Organization detail
"courses/cms/organization_detail.html banner": {
"name": _("Banner"),
"plugins": ["SimplePicturePlugin"],
"limits": {"SimplePicturePlugin": 1},
},
"courses/cms/organization_detail.html logo": {
"name": _("Logo"),
"plugins": ["SimplePicturePlugin"],
"limits": {"SimplePicturePlugin": 1},
},
"courses/cms/organization_detail.html categories": {
"name": _("Categories"),
"plugins": ["CategoryPlugin"],
},
"courses/cms/organization_detail.html description": {
"name": _("Description"),
"plugins": ["CKEditorPlugin"],
"limits": {"CKEditorPlugin": 1},
},
# Category detail
"courses/cms/category_detail.html banner": {
"name": _("Banner"),
"plugins": ["SimplePicturePlugin"],
"limits": {"SimplePicturePlugin": 1},
},
"courses/cms/category_detail.html logo": {
"name": _("Logo"),
"plugins": ["SimplePicturePlugin"],
"limits": {"SimplePicturePlugin": 1},
},
"courses/cms/category_detail.html icon": {
"name": _("Icon"),
"plugins": ["SimplePicturePlugin"],
"limits": {"SimplePicturePlugin": 1},
},
"courses/cms/category_detail.html description": {
"name": _("Description"),
"plugins": ["CKEditorPlugin"],
"limits": {"CKEditorPlugin": 1},
},
# Person detail
"courses/cms/person_detail.html categories": {
"name": _("Categories"),
"plugins": ["CategoryPlugin"],
},
"courses/cms/person_detail.html portrait": {
"name": _("Portrait"),
"plugins": ["SimplePicturePlugin"],
"limits": {"SimplePicturePlugin": 1},
},
"courses/cms/person_detail.html bio": {
"name": _("Bio"),
"plugins": ["PlainTextPlugin"],
"limits": {"PlainTextPlugin": 1},
},
"courses/cms/person_detail.html maincontent": {
"name": _("Main Content"),
"plugins": ["CKEditorPlugin", "PersonPlugin", "SectionPlugin", "GlimpsePlugin"],
"child_classes": {
"SectionPlugin": ["CKEditorPlugin", "GlimpsePlugin", "PersonPlugin"]
},
"limits": {"CKEditorPlugin": 1},
},
"courses/cms/person_detail.html organizations": {
"name": _("Organizations"),
"plugins": ["OrganizationPlugin"],
},
# Blog page detail
"courses/cms/blogpost_detail.html author": {
"name": _("Author"),
"plugins": ["PersonPlugin"],
"limits": {"PersonPlugin": 1},
},
"courses/cms/blogpost_detail.html categories": {
"name": _("Categories"),
"plugins": ["CategoryPlugin"],
},
"courses/cms/blogpost_detail.html cover": {
"name": _("Cover"),
"plugins": ["SimplePicturePlugin"],
"limits": {"SimplePicturePlugin": 1},
},
"courses/cms/blogpost_detail.html excerpt": {
"name": _("Excerpt"),
"plugins": ["PlainTextPlugin"],
"limits": {"PlainTextPlugin": 1},
},
"courses/cms/blogpost_detail.html body": {
"name": _("Body"),
"excluded_plugins": ["CKEditorPlugin", "GoogleMapPlugin"],
},
"courses/cms/blogpost_detail.html headline": {
"name": _("Headline"),
"plugins": ["SectionPlugin", "CKEditorPlugin"],
"child_classes": {"SectionPlugin": ["CKEditorPlugin"]},
},
# Program page detail
"courses/cms/program_detail.html program_cover": {
"name": _("Cover"),
"plugins": ["SimplePicturePlugin"],
"limits": {"SimplePicturePlugin": 1},
},
"courses/cms/program_detail.html program_excerpt": {
"name": _("Excerpt"),
"plugins": ["PlainTextPlugin"],
"limits": {"PlainTextPlugin": 1},
},
"courses/cms/program_detail.html program_body": {
"name": _("Body"),
"plugins": ["CKEditorPlugin"],
"limits": {"CKEditorPlugin": 1},
},
"courses/cms/program_detail.html program_courses": {
"name": _("Courses"),
"plugins": ["CoursePlugin"],
},
"courses/cms/program_list.html maincontent": {
"name": _("Main content"),
"plugins": ["SectionPlugin"],
"child_classes": {
"SectionPlugin": [
"BlogPostPlugin",
"CategoryPlugin",
"CoursePlugin",
"GlimpsePlugin",
"LinkPlugin",
"OrganizationPlugin",
"OrganizationsByCategoryPlugin",
"PersonPlugin",
"CKEditorPlugin",
"SectionPlugin",
"NestedItemPlugin",
],
"NestedItemPlugin": ["CategoryPlugin"],
},
},
}
# Main CKEditor configuration
CKEDITOR_SETTINGS = {
"language": "{{ language }}",
"skin": "moono-lisa",
"toolbarCanCollapse": False,
"contentsCss": "/static/richie/css/ckeditor.css",
# Enabled showblocks as default behavior
"startupOutlineBlocks": True,
# Enable some plugins
# 'extraPlugins': 'codemirror',
# Disable element filter to enable full HTML5, also this will let
# append any code, even bad syntax and malicious code, so be careful
"removePlugins": "stylesheetparser",
"allowedContent": True,
# Image plugin options
"image_prefillDimensions": False,
# Justify text using shortand class names
"justifyClasses": ["text-left", "text-center", "text-right"],
# Default toolbar configurations for djangocms_text_ckeditor
"toolbar": "CMS",
"toolbar_CMS": [
["Undo", "Redo"],
["cmsplugins", "-", "ShowBlocks"],
["Format", "Styles"],
["RemoveFormat"],
["Maximize"],
"/",
["Bold", "Italic", "Underline", "-", "Subscript", "Superscript"],
["JustifyLeft", "JustifyCenter", "JustifyRight"],
["Link", "Unlink"],
["NumberedList", "BulletedList", "-", "HorizontalRule"],
["Source"],
],
}
# Share the same configuration for djangocms_text_ckeditor field and derived
# CKEditor widgets/fields
CKEDITOR_SETTINGS["toolbar_HTMLField"] = CKEDITOR_SETTINGS["toolbar_CMS"]
# CKEditor configuration for basic formatting
CKEDITOR_BASIC_CONFIGURATION = {
"language": "{{ language }}",
"skin": "moono-lisa",
"toolbarCanCollapse": False,
"contentsCss": "/static/css/ckeditor.css",
# Only enable following tag definitions
"allowedContent": ["p", "b", "i", "a[href]"],
# Enabled showblocks as default behavior
"startupOutlineBlocks": True,
# Default toolbar configurations for djangocms_text_ckeditor
"toolbar": "HTMLField",
"toolbar_HTMLField": [["Undo", "Redo"], ["Bold", "Italic"], ["Link", "Unlink"]],
}
# CKEditor configuration for formatting limited to:
# paragraph, bold, italic and numbered or bulleted lists.
CKEDITOR_LIMITED_CONFIGURATION = {
"language": "{{ language }}",
"skin": "moono-lisa",
"toolbarCanCollapse": False,
"contentsCss": "/static/css/ckeditor.css",
# Only enable following tag definitions
"allowedContent": ["p", "b", "i", "ol", "ul", "li"],
# Enabled showblocks as default behavior
"startupOutlineBlocks": True,
# Default toolbar configurations for djangocms_text_ckeditor
"toolbar": "HTMLField",
"toolbar_HTMLField": [
["Undo", "Redo"],
["Bold", "Italic"],
["Link", "Unlink"],
["NumberedList", "BulletedList", "-"],
],
}
# CKEditor configuration for formatting section title:
# only bold entity
CKEDITOR_INLINE_BOLD_CONFIGURATION = {
"language": "{{ language }}",
"skin": "moono-lisa",
"toolbarCanCollapse": False,
"contentsCss": "/static/css/ckeditor.css",
# Only enable following tag definitions
"allowedContent": ["strong"],
# Block commands which adds break lines (Enter & Shift + Enter)
# Enter Key Code = 13
# CKEDITOR.SHIFT + Enter = 2228224 + 13 = 2228237
"blockedKeystrokes": [13, 2228237],
"keystrokes": [[13, None], [2228237, None]],
# Enabled showblocks as default behavior
"startupOutlineBlocks": True,
# Default toolbar configurations for djangocms_text_ckeditor
"toolbar_HTMLField": [["Undo", "Redo"], ["Bold"]],
"enterMode": 2,
"autoParagraph": False,
"resize_enabled": False,
"height": 68,
}
# Additional LinkPlugin templates. Note how choice value is just a keyword
# instead of full template path. Value is used inside a path formatting
# such as "templates/djangocms_link/VALUE/link.html"
DJANGOCMS_LINK_TEMPLATES = [("button-caesura", _("Button caesura"))]
DJANGOCMS_VIDEO_TEMPLATES = [("full-width", _("Full width"))]
# Richie plugins
RICHIE_PLAINTEXT_MAXLENGTH = {"course_introduction": 200, "bio": 150, "excerpt": 240}
RICHIE_SIMPLETEXT_CONFIGURATION = [
{
"placeholders": ["course_skills", "course_plan"],
"ckeditor": "CKEDITOR_LIMITED_CONFIGURATION",
},
{
"placeholders": ["course_description"],
"ckeditor": "CKEDITOR_LIMITED_CONFIGURATION",
"max_length": 1200,
},
{
"placeholders": ["maincontent"],
"ckeditor": "CKEDITOR_SETTINGS",
"max_length": 5000,
},
{
"placeholders": ["course_assessment", "course_format", "course_prerequisites"],
"ckeditor": "CKEDITOR_BASIC_CONFIGURATION",
},
]
RICHIE_SIMPLEPICTURE_PRESETS = {
# Formatting images for the courses search index
"cover": {
"src": {"size": (300, 170), "crop": "smart"},
"srcset": [
{
"options": {"size": (300, 170), "crop": "smart", "upscale": True},
"descriptor": "300w",
},
{
"options": {"size": (600, 340), "crop": "smart", "upscale": True},
"descriptor": "600w",
},
{
"options": {"size": (900, 560), "crop": "smart", "upscale": True},
"descriptor": "900w",
},
],
"sizes": "300px",
},
"icon": {
"src": {"size": (60, 60), "crop": "smart"},
"srcset": [
{
"options": {"size": (60, 60), "crop": "smart", "upscale": True},
"descriptor": "60w",
},
{
"options": {"size": (120, 120), "crop": "smart", "upscale": True},
"descriptor": "120w",
},
{
"options": {"size": (180, 180), "crop": "smart", "upscale": True},
"descriptor": "180w",
},
],
"sizes": "60px",
},
}
|
from glob import glob
import os
path = "/home/gongjinlan/projects/revolve/output/cpg_bo/main_1559644358-BO-gecko7/"
print(path)
paths = glob(path + "*/*.png")
print(len(paths))
for path in paths:
os.remove(path)
|
import tensorflow as tf
from keras.models import load_model
from flask import Flask, render_template, request
import numpy as np
import base64
import json
import logging
from scipy.misc import imresize
import imageio
import warnings
tf.get_logger().setLevel(logging.ERROR)
warnings.filterwarnings("ignore")
app = Flask(__name__)
NUMBER = {0: "Zero", 1: "One", 2: "Two", 3: "Three", 4: "Four", 5: "Five", 6: "Six",
7: "Seven", 8: "Eight", 9: "Nine"}
@app.route("/", methods=["GET", "POST"])
def ready():
if request.method == "GET":
return render_template("index1.html")
if request.method == "POST":
data = request.form["payload"].split(",")[1]
net = request.form["net"]
graph = tf.get_default_graph()
img = base64.b64decode(data)
with open('temp.png', 'wb') as output:
output.write(img)
x = imageio.imread('temp.png', pilmode='L')
x = imresize(x, (28, 28))
'''
failed to remove imresize warning
tried np.array(Image.fromarray(x).resize((28, 28)))
tried np.array(Image.fromarray(x, mode="Lc").resize((28, 28)))
tried matplotlib, rgb2gray
'''
x = np.expand_dims(x, axis=0)
x = x.reshape(28, 28, 1)
x = np.invert(x)
# brighten the image by 60%
for i in range(len(x)):
for j in range(len(x)):
if x[i][j] > 50:
x[i][j] = min(255, x[i][j] + x[i][j] * 0.60)
# normalize the values between 0 and 1
x = np.interp(x, [0, 255], [0, 1])
model = load_model("./models/augmented_data_best_model.h5")
with graph.as_default():
val = model.predict([[x]])
pred = NUMBER[np.argmax(val)]
classes = ["Zero", "One", "Two", "Three", "Four", "Five", "Six",
"Seven", "Eight", "Nine"]
print(pred)
print(list(val[0]))
return render_template("index1.html", preds=list(val[0]), classes=json.dumps(classes), chart=True, putback=request.form["payload"], net=net, num=pred)
if __name__ == '__main__':
app.run(host="127.0.0.1", port=8080, debug=True, threaded=False)
|
"""
Given an unsorted array, find the maximum difference between the successive elements in its sorted form.
Return 0 if the array contains less than 2 elements.
Example 1:
Input: [3,6,9,1]
Output: 3
Explanation: The sorted form of the array is [1,3,6,9], either
(3,6) or (6,9) has the maximum difference 3.
Example 2:
Input: [10]
Output: 0
Explanation: The array contains less than 2 elements, therefore return 0.
Note:
You may assume all elements in the array are non-negative integers and fit in the 32-bit signed integer range.
Try to solve it in linear time/space.
"""
class Solution:
def maximumGap(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
"""
Method 1:
#TODO: Solve in linear time/space
* Sort the array
* Calculate successive difference between elements
Your runtime beats 92.67 % of python3 submissions.
"""
nums[:] = sorted(nums)
print(nums)
res = 0
for index in range(1, len(nums)):
diff = nums[index] - nums[index - 1]
res = max(res, diff)
return res |
"""DESIGM Clustering Algorithm"""
# Authors: Jeffrey Wang
# License: BSD 3 clause
import numpy as np
from ._utils import format_array, create_random_state
class DSIGM:
"""
A Clustering Model using the DSIGM Clustering Algorithm.
DSIGM - Density-sensitive Self-stabilization of
Independent Gaussian Mixtures.
Fits a self-stabilized number of Gaussian components
and groups them into clusters in a density sensitive manner.
Parameters
----------
n_clusters : int or None, default=None
Number of CoreClusters to be fitted to the data.
When `n_clusters` is None, determine best fit of CoreClusters.
n_cores : int, default=10
Number of Cores (Gaussian components) to fit the data.
The initial number is the number of Cores at initialization.
Subsequently, it tracks the actual number of Cores.
n_init : int, default=10
Number of time the DSIGM algorithm will be run with different
Core seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default=200
Maximum number of iterations of the DSIGM algorithm for a
single run.
ds : bool, default=True
DESIGM algorithm groups Cores in a density sensitive manner,
akin to the OPTICS algorithm.
tol : float, default=1e-4
Relative tolerance with regards to the difference in inertia
of two consecutive iterations to declare convergence.
random_state : None or int or RandomState, default=None
Determines random number generation for Core initialization. Use
an int to make the randomness deterministic.
Attributes
----------
inertia : float
Average of maximal probabilities of each sample to each Core.
cores : array-like, shape (n_cores,)
A list of Cores.
clusters : CoreCluster or None
A graph of CoreClusters.
"""
def __init__(self, n_clusters=None, init_cores=10, stabilize=0.5, n_init=10,
max_iter=200, ds=True, tol=1e-4, random_state=None):
self.sgmm = SGMM(init_cores=n_cores, stabilize=stabilize,
n_init=n_init, max_iter=max_iter,
tol=tol, random_state=random_state)
def fit(data, weights=None):
"""
Fit the model to `data`.
"""
pass
def predict(data, weights=None):
"""
Predict the clusters `data` belongs to.
"""
pass
|
"""
This file is part of GSMWS.
"""
import gsm
import collections
import threading
import logging
import time
import datetime
import Queue
import sqlite3
import zmq
from sets import Set
class MeasurementReportList(object):
def __init__(self, maxlen=10000):
self.lock = threading.Lock()
self.maxlen = maxlen
self.reports = collections.deque(maxlen=maxlen)
def put(self, report):
with self.lock:
self.reports.append(report)
def get(self):
with self.lock:
self.reports.popleft()
def getall(self):
with self.lock:
reports, self.reports = self.reports, collections.deque(maxlen=self.maxlen)
return list(reports)
class EventDecoder(threading.Thread):
"""
The EventDecoder listens for PhysicalStatus API events from OpenBTS and
stores them in an in-memory MeasurementReportList. Unlike GSMDecoder, the
EventDecoder does no further processing on them -- they are passed along
as-is for interpretation later. We don't even decode the JSON, as these are
intended to be pulled via an API from a BTS, so why bother?
"""
def __init__(self, host="tcp://localhost:45160", maxlen=1000, loglvl=logging.INFO):
threading.Thread.__init__(self)
logging.basicConfig(format='%(asctime)s %(module)s %(funcName)s %(lineno)d %(levelname)s %(message)s',
filename='/var/log/gsmws.log',level=loglvl)
# Connect to OpenBTS event stream
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
self.socket.connect(host)
self.socket.setsockopt(zmq.SUBSCRIBE, "")
self.reports = MeasurementReportList(maxlen)
def run(self):
"""
Main processing loop. Run forever!
"""
while True:
msg = self.socket.recv()
self.reports.put(msg)
class GSMDecoder(threading.Thread):
"""
DEPRECATED
This is responsible for managing the packet stream from tshark, processing
reports, and storing the data.
"""
def __init__(self, stream, db_lock, gsmwsdb_location, nct, maxlen=100, loglvl=logging.INFO, decoder_id=0):
threading.Thread.__init__(self)
self.stream = stream
self.current_message = ""
self.current_arfcn = None
self.num_of_cells = None
self.last_arfcns = []
self.ncc_permitted = None
self.ignore_reports = False # ignore measurement reports
self.msgs_seen = 0
self.runtime = {}
self.runtime["initial_time"] = None
self.runtime["arfcns"] = []
self.runtime["rssis"] = []
self.runtime["timestamp"] = []
self.runtime["arfcn_tracking"] = [False, False, False, False, False]
self.NEIGHBOR_CYCLE_TIME = nct
self.gsmwsdb_lock = db_lock
self.gsmwsdb_location = gsmwsdb_location
self.gsmwsdb = None # this gets created in run()
self.decoder_id = decoder_id
self.rssi_queue = Queue.Queue()
self.reports = MeasurementReportList()
self.strengths_maxlen = maxlen
self.max_strengths = {} # max strength ever seen for a given arfcn
self.recent_strengths = {} # last 100 measurement reports for each arfcn
logging.basicConfig(format='%(asctime)s %(module)s %(funcName)s %(lineno)d %(levelname)s %(message)s', filename='/var/log/gsmws.log',level=loglvl)
logging.warn("GSMDecoder is deprecated! Use at your own risk.")
def _populate_strengths(self):
"""
Rather than storing our history, we can just store the current mean for
each ARFCN, plus the number of recent readings we have. On start, we
just add N instances of each ARFCN's mean to the list. This has the
downside of being not general (only works with means) and losing
history potentially (i.e., we die twice in a row: we'll repopulate with
just the mean value from before).
"""
# populate the above from stable
with self.gsmwsdb_lock:
max_strengths = self.gsmwsdb.execute("SELECT ARFCN, RSSI FROM MAX_STRENGTHS").fetchall()
for item in max_strengths:
self.max_strengths[item[0]] = item[1]
recent = self.gsmwsdb.execute("SELECT ARFCN, RSSI, COUNT FROM AVG_STRENGTHS").fetchall()
for item in recent:
self.recent_strengths[item[0]] = collections.deque([item[1] for _ in range(0,item[2])],maxlen=self.strengths_maxlen)
def __write_rssi(self):
if not self.rssi_queue.empty():
with self.gsmwsdb_lock:
while not self.rssi_queue.empty():
try:
query = self.rssi_queue.get()
self.gsmwsdb.execute(query[0], query[1])
except Queue.Empty:
break
self.gsmwsdb.commit()
def rssi(self):
# returns a dict with a weighted average of each arfcn
# we base this only on last known data for an ARFCN -- lack of report
# doesn't mean anything, but if an arfcn is in the neighbor list and we
# don't get a report for it, we count that as -1.
res = {}
now = datetime.datetime.now()
for arfcn in self.max_strengths:
tot = self.max_strengths[arfcn] + sum(self.recent_strengths[arfcn])
res[arfcn] = float(tot) / (1 + len(self.recent_strengths[arfcn]))
# now, update the db
recent_avg = sum(self.recent_strengths[arfcn]) / float(len(self.recent_strengths[arfcn]))
self.rssi_queue.put(("DELETE FROM AVG_STRENGTHS WHERE ARFCN=?", (arfcn,)))
self.rssi_queue.put(("INSERT INTO AVG_STRENGTHS VALUES (?, ?, ?, ?)", (now, arfcn, recent_avg, len(self.recent_strengths[arfcn]))))
return res
def run(self):
logging.info("In Decoder run")
self.gsmwsdb = sqlite3.connect(self.gsmwsdb_location)
self._populate_strengths()
last_rssi_update = datetime.datetime.now()
# Main processing loop. We read output from tshark line by line
# breaking every time we find a line that is unindented. Unindented
# line = new message. The message is then handed off to process(),
# which extracts relevant information from it.
for line in self.stream:
self.__write_rssi()
if line.startswith(" "):
#print "appending"
self.current_message += "%s" % line
else:
self.process(self.current_message)
self.current_message = line
def update_strength(self, strengths):
self.update_max_strength(strengths)
self.update_recent_strengths(strengths)
def update_max_strength(self, strengths):
with self.gsmwsdb_lock:
for arfcn in strengths:
value = strengths[arfcn]
now = datetime.datetime.now()
# FIXME potential leak here: we could record max values twice if we're
# not in sync w/ db, but that should only happen rarely
if arfcn not in self.max_strengths:
self.max_strengths[arfcn] = value
self.gsmwsdb.execute("INSERT INTO MAX_STRENGTHS VALUES(?,?,?)", (now, arfcn, value))
elif value > self.max_strengths[arfcn]:
self.max_strengths[arfcn] = value
self.gsmwsdb.execute("UPDATE MAX_STRENGTHS SET TIMESTAMP=?, RSSI=? WHERE ARFCN=?", (now, value, arfcn))
to_delete = []
for arfcn in self.max_strengths:
if arfcn not in strengths:
to_delete.append(arfcn)
self.gsmwsdb.execute("DELETE FROM MAX_STRENGTHS WHERE ARFCN=?", (arfcn,))
for arfcn in to_delete:
del self.max_strengths[arfcn]
self.gsmwsdb.commit()
def update_recent_strengths(self, strengths):
for arfcn in strengths:
value = strengths[arfcn]
if arfcn in self.recent_strengths:
self.recent_strengths[arfcn].append(value)
else:
self.recent_strengths[arfcn] = collections.deque([value],maxlen=self.strengths_maxlen)
with self.gsmwsdb_lock:
to_delete = []
for arfcn in self.recent_strengths:
if arfcn not in strengths:
to_delete.append(arfcn)
self.gsmwsdb.execute("DELETE FROM AVG_STRENGTHS WHERE ARFCN=?", (arfcn,))
for arfcn in to_delete:
del self.recent_strengths[arfcn]
# force a write whenever we update strength
self.rssi()
self.__write_rssi()
def process(self, message):
logging.info("In Decoder process")
self.msgs_seen += 1
if message.startswith("GSM A-I/F DTAP - Measurement Report"):
logging.info("In Decoder Measurement Report")
if self.ignore_reports or self.current_arfcn is None or len(self.last_arfcns) == 0:
return # skip for now, we don't have enough data to work with
report = gsm.MeasurementReport(self.last_arfcns, self.current_arfcn, message)
if report.valid:
logging.info("(decoder %d) MeasurementReport: " % (self.decoder_id) + str(report))
self.reports.put(report.current_strengths)
# removed the for loop from here
self.update_max_strength(report.current_strengths)
self.update_recent_strengths(report.current_strengths)
for arfcn in report.current_bsics:
if report.current_bsics[arfcn] != None:
logging.debug("ZOUNDS! AN ENEMY BSIC: %d (ARFCN %d, decoder %d)" % (report.current_bsics[arfcn], arfcn, self.decoder_id))
#gsmtap = gsm.GSMTAP(message)
#neighbor_details = report.neighbor_details
#if self.runtime["initial_time"] == None:
# self.runtime["initial_time"] = datetime.datetime.now()
#timestamp = datetime.datetime.now()
#indexes = []
#if len(neighbor_details["arfcns"]) > 0:
# for arfcn in neighbor_details["arfcns"]:
#logging.info("(decoder %d) MeasureMent Report: Neighbor ARFCN=%s" % (self.decoder_id, arfcn))
#neighbor_details["arfcns"][arfcn]
# if str(arfcn) != 0:
# if arfcn not in self.runtime["arfcns"]:
# self.runtime["arfcns"].append(arfcn)#neighbor_details["arfcns"][arfcn])
#self.runtime["rssis"].append(neighbor_details["rssis"][arfcn])
# self.runtime["arfcn_tracking"].insert(neighbor_details["arfcns"].index(arfcn), True)#neighbor_details["arfcns"][arfcn]), True)
# else:
# self.runtime["arfcn_tracking"].insert(neighbor_details["arfcns"].index(arfcn), True)#neighbor_details["arfcns"][arfcn]), True)
# indexes.append(neighbor_details["arfcns"].index(arfcn))
# for rssi in neighbor_details["rssis"]:
# if rssi not in self.runtime["rssis"]:
# self.runtime["rssis"].append(rssi)
# for _ in self.runtime["arfcn_tracking"]:
# if _ not in indexes:
# self.runtime["arfcn_tracking"].insert(self.runtime["arfcn_tracking"].index(_), False)
#checked_time = timestamp - self.runtime["initial_time"]
#if checked_time.seconds > self.NEIGHBOR_CYCLE_TIME:
# if len(self.runtime["arfcns"]) > 0:
# unique_list_of_arfcns = list(set(self.runtime["arfcns"]))
# with self.gsmwsdb_lock:
# for tracker in self.runtime["arfcn_tracking"]:
# if tracker is False:
# self.gsmwsdb.execute("INSERT INTO AVAIL_ARFCN VALUES(?,?,?)",
# (tracker, timestamp, self.runtime["rssis"][self.runtime["arfcn_tracking"].index(tracker)]))
elif message.startswith("GSM CCCH - System Information Type 2"):
sysinfo2 = gsm.SystemInformationTwo(message)
self.last_arfcns = sysinfo2.arfcns
self.ncc_permitted = sysinfo2.ncc_permitted
logging.debug("(decoder %d) SystemInformation2: %s" % (self.decoder_id, str(sysinfo2.arfcns)))
elif message.startswith("GSM TAP Header"):
gsmtap = gsm.GSMTAP(message)
self.current_arfcn = gsmtap.arfcn
logging.debug("(decoder %d) GSMTAP: Current ARFCN=%s" % (self.decoder_id, str(gsmtap.arfcn)))
|
import struct
import codecs
from protogen.stalk_proto import models_pb2 as models
from typing import List, Union, Optional
ColorType = List[float]
def color(*channels: Union[int, float], alpha: Optional[float] = None) -> ColorType:
"""
Convert a set 0-255 RGB int values to 0.0-1.0 floats. Alpha is expected to already
be formatted as a float.
If the ``channels`` values are already floats, no changes will be made.
"""
values: List[float] = list()
for channel_value in channels:
if isinstance(channel_value, int):
channel_value = float(channel_value) / 255.0
values.append(channel_value)
if alpha is not None:
values.append(alpha)
return values
def hex2rgb(hex_str: str) -> ColorType:
hex_str = hex_str.lstrip("#")
return color(*struct.unpack("BBB", codecs.decode(hex_str.encode(), "hex")))
# COLOR CONSTANTS
BACKGROUND_COLOR = color(255, 255, 255)
PRICE_LABEL_COLOR = color(100, 200, 100, 0.75)
PRICE_GRID_COLOR = color(74, 109, 77)
PRICE_GRID_ALPHA = 0.5
DAY_GRID_COLOR = color(255, 255, 255)
DAY_GRID_ALPHA = 0.02
CURRENT_PRICE_COLOR = color(206, 165, 88)
DAY_LABEL_COLOR = CURRENT_PRICE_COLOR
WHITE_LABEL_COLOR = color(255, 255, 255, alpha=0.75)
BIG_SPIKE_COLOR = color(95, 160, 95)
SMALL_SPIKE_COLOR = color(35, 120, 160)
DECREASING_COLOR = color(160, 95, 95)
FLUCTUATING_COLOR = color(200, 100, 200)
# INDEX OF COLORS ASSOCIATED WITH A SPECIFIC PRICE PATTERN
PATTERN_COLORS = {
models.PricePatterns.BIGSPIKE: BIG_SPIKE_COLOR,
models.PricePatterns.SMALLSPIKE: SMALL_SPIKE_COLOR,
models.PricePatterns.DECREASING: DECREASING_COLOR,
models.PricePatterns.FLUCTUATING: FLUCTUATING_COLOR,
}
|
import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import itertools
class Model:
def __init__(self, logger, optimizer, learning_rate, checkpoint_dir):
self.logger = logger
self.init_global_step()
self.build_model()
self.x = tf.placeholder(shape=[None], name="x", dtype=tf.float32)
self.y = tf.placeholder(shape=[None], name="y", dtype=tf.int32)
self.loss = self.get_loss()
self.optimizer = self.get_optimizer(optimizer, learning_rate)
self.training_scalar = tf.summary.scalar("training_loss", self.loss)
self.validation_scalar = tf.summary.scalar("validation_loss", self.loss)
self.histogram_merged = tf.summary.merge_all()
self.checkpoint_dir = checkpoint_dir
self.saver = tf.train.Saver(var_list=tf.global_variables())
def save(self, sess):
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.saver.save(sess, self.checkpoint_dir + '/model', global_step=self.global_step_tensor)
self.logger.info("Model saved")
def load(self, session):
latest_checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir)
if latest_checkpoint:
self.logger.info("Loading model checkpoint {} ...\n".format(latest_checkpoint))
self.saver.restore(session, latest_checkpoint)
return True
else:
self.logger.info("Checkpoint not found")
return False
def init_global_step(self):
with tf.variable_scope('global_step'):
self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step')
def build_mlp(self, input_placeholder, output_size, scope, num_layers=2, layer_size=64,
activation=tf.tanh, output_activation=None):
with tf.variable_scope(scope):
dense = input_placeholder
for _ in range(num_layers):
dense = tf.layers.dense(inputs=dense, units=layer_size, activation=activation)
return tf.layers.dense(inputs=dense, units=output_size, activation=output_activation)
def build_model(self):
# TODO: output_size
logits_na = self.build_mlp(input_placeholder=self.x, output_size=1,
scope="discrete_policy_network",
activation=tf.nn.relu)
sampled_ac = tf.squeeze(tf.multinomial(logits_na, 1), axis=[1])
logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=logits_na)
return logprob_n, sampled_ac
def get_optimizer(self, optimizer, learning_rate):
self.logger.info("Using %s optimizer" % optimizer)
if optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate).minimize(self.loss,
global_step=self.global_step_tensor)
elif optimizer == "adagrad":
return tf.train.AdagradOptimizer(learning_rate).minimize(self.loss,
global_step=self.global_step_tensor)
elif optimizer == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate).minimize(self.loss,
global_step=self.global_step_tensor)
else:
return tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss,
global_step=self.global_step_tensor)
def get_loss(self):
# TODO: calculate loss
loss = tf.reduce_mean(tf.multiply(1, 1))
return loss
def validate(self, sess, batch_x, batch_y):
return sess.run([self.loss, self.validation_scalar],
feed_dict={self.x:batch_x,
self.y: batch_y})
def predict(self, sess, batch_x):
return sess.run(self.y,
feed_dict={self.x:batch_x})
def update(self, sess, batch_x, batch_y, advantages, keep_prob):
loss, training_scalar, _, histogram_merged, _ = sess.run([self.loss, self.training_scalar, self.histogram_merged, self.optimizer],
feed_dict={self.x: batch_x,
self.y: batch_y})
return loss, training_scalar, histogram_merged
def test_run(self, sess, env, max_steps):
obvs = []
actions = []
reward = 0.
obv = env.reset()
for steps in itertools.count() :
obvs.append(obv)
actions.append(self.predict(sess, np.expand_dims(obv,axis=0))[0])
obv, r, done, _ = env.step(actions[-1])
reward += r
if steps >= max_steps or done:
break
experience = {'observations': np.stack(obvs,axis=0),
'actions': np.squeeze(np.stack(actions,axis=0)),
'reward':reward}
return experience |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2020 Raphaël Barrois
# This code is distributed under the two-clause BSD license.
from django.conf import settings
from django.db import models as django_models
from django.utils.translation import gettext_lazy as _
from .. import models
class TransitionLog(models.GenericTransitionLog):
"""The log for a transition.
Attributes:
modified_object (django.db.model.Model): the object affected by this
transition.
from_state (str): the name of the origin state
to_state (str): the name of the destination state
transition (str): The name of the transition being performed.
timestamp (datetime): The time at which the Transition was performed.
user (django.contrib.auth.user): the user performing the transition; the
actual model to use here is defined in the XWORKFLOWS_USER_MODEL
setting.
"""
# Additional keyword arguments to store, if provided
EXTRA_LOG_ATTRIBUTES = (
('user', 'user', None), # Store the 'user' kwarg to transitions.
)
user = django_models.ForeignKey(
getattr(settings, 'XWORKFLOWS_USER_MODEL', getattr(settings, 'AUTH_USER_MODEL', 'auth.User')),
blank=True, null=True, on_delete=django_models.CASCADE, verbose_name=_("author"),
)
|
from os import EX_CANTCREAT
from glogpy.job import gaussian_job
from glogpy.linkparser import linkparsers
import numpy as np
# Jobs of the form
# 1/xxxx/1,18;
# 2/xxxx/2; <- GEOM INIT
# 3/xxxx/1,2,3;
# 4/xxxx/1,5;
# 5/xxxx/10; <- State composition
# 8/xxxx/1;
# 11/xxx/1;
# 10/xxx/3(-3);
# 6/xxxx/1; <- Spin densiry / mulliken
# 7/xxxx/1,2,3,16; <- Initial forces
# 1/xxxx/18(3);
# 2/xxxx/2; <- GEOM FINAL
# 99/xxx/99;
class dynamics_job(gaussian_job):
def __init__(self, txt):
super().__init__(txt)
def parse(self):
l202_init = None
for x in self.link_list:
if x.number == 202 :
l202_init = x
break
if l202_init == None : raise Exception("[DYNX] No link 202!")
geom = linkparsers.L202(l202_init.text)['geom']
res = None
for x in self.link_list[::-1]: # Look for the last L510 -> State composition
if x.number == 510 and 97 in x.iops.keys():
res = linkparsers.L510_TD(x.text)
break
if res==None : raise Exception("[DYNX] No dynamics-compatible L510 found")
res2 = None
for x in self.link_list[::-1]: # Look for the last L601 -> Miliken pop + sd
if x.number == 601:
res2 = linkparsers.L601(x.text, spin_dens=True, dipole=True)
break
if res2==None : raise Exception("[DYNX] No dynamics-compatible L601 found")
res.update(res2)
res2 = None
for x in self.link_list[::-1]: # Look for the last L716 -> MaxForce + Forces
if x.number == 716:
res2 = linkparsers.L716_hpmodes(x.text, len(geom), False)
break
if res2==None : raise Exception("[DYNX] No dynamics-compatible L716 found")
res.update(res2)
res2 = None
for x in self.link_list[::-1]: # Look for the last L202
if x.number == 202:
if x == l202_init : raise Exception("[DYNX] No second L202 found!")
res2 = linkparsers.L202(x.text)
break
res['geom_final'] = res2['geom']
res['geom_init'] = geom
# print(res)
return res
# {
# 'diabats': {1: (-0.22095+0.01429j), 2: (0.11627-0.00537j), 3: (0.16042+0.00451j), 4: (0.49084-0.1658j), 5: (0.24327+0.2859j), 6: (-0.05558+0.04399j), 7: (0.49918-0.09541j), 8: (-0.41637+0.25625j)},
# 'adiabats': {1: (0.73058-0.15666j), 2: (-0.02168+0.18205j), 3: (0.35406-0.05912j), 4: (-0.2881+0.02187j), 5: (-0.04753+0.18635j), 6: (-0.15433+0.09799j), 7: (-0.20731-0.05285j), 8: (-0.04674+0.27822j)},
#
# 'case': -281.1206215714,
# 'casde': -1.5000000000000002e-05,
#
# 'muliken': {1: 0.860896, 2: -0.471127, 3: -0.333442, 4: -0.328013, 5: -0.732424, 6: 0.582688, 7: 0.315301, 8: 0.315301, 9: 0.39541, 10: 0.39541},
# 'mulliken_sum': {1: 0.860896, 2: -0.471127, 3: 0.249246, 4: 0.302588, 5: 0.058397},
#
# 'spinden': {1: 0.208977, 2: 0.250161, 3: 0.452028, 4: 0.042886, 5: 0.045869, 6: 0.001223, 7: -6e-05, 8: -6e-05, 9: -0.000513, 10: -0.000513},
# 'spinden_sum': {1: 0.208977, 2: 0.250161, 3: 0.453252, 4: 0.042767, 5: 0.044843},
#
# 'dipole': [['-3.5701', '4.1027', '-0.0000'], '5.4385'],
#
# 'atommasses': {1: 12.0, 2: 15.99491, 3: 15.99491, 4: 12.0, 5: 14.00307, 6: 1.00783, 7: 1.00783, 8: 1.00783, 9: 1.00783, 10: 1.00783},
#
# 'atomnos': {1: 6, 2: 8, 3: 8, 4: 6, 5: 7, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1},
# 'temperature': 298.15,
# 'pressure': 1.0,
#
# 'forces': {1: array([ 1.65471143e-01, -1.61748502e-01, 6.00000000e-08]), 2: array([-2.6836681e-02, 6.0881460e-03, -9.3000000e-08]), 3: array([-9.57545490e-02, 1.74179419e-01, 2.13000000e-07]), 4: array([ 6.999268e-03, -2.780492e-02, -2.400000e-08]), 5: array([ 1.2235064e-02, 8.0989678e-02, -5.0000000e-09]), 6: array([-4.3020526e-02, -2.9154888e-02, -1.1200000e-07]), 7: array([ 0.00334231, -0.00568359, -0.00953219]), 8: array([ 0.0033423 , -0.00568355, 0.00953219]), 9: array([-0.01288919, -0.01559086, -0.01395954]), 10: array([-0.01288915, -0.01559093, 0.01395951])},
# 'maxforce': 0.174179419,
# 'rmsforce': 0.059306177,
#
# 'geom_final': {1: [0, array([-0.090002, 0.512569, 0. ])], 2: [0, array([1.342 , 0.766244, 0. ])], 3: [0, array([-1.050758, 1.451089, 0. ])], 4: [0, array([-0.641986, -0.894045, 0. ])], 5: [0, array([ 0.39821 , -1.987024, 0. ])], 6: [0, array([-0.87376 , 2.454532, 0. ])], 7: [0, array([-1.280347, -0.952623, 0.893902])], 8: [0, array([-1.280347, -0.952623, -0.893902])], 9: [0, array([ 0.997514, -1.794707, 0.80825 ])], 10: [0, array([ 0.997514, -1.794707, -0.80825 ])]},
# 'geom_init': {1: [0, array([-0.077164, 0.554472, 0. ])], 2: [0, array([1.354838, 0.808147, 0. ])], 3: [0, array([-1.03792 , 1.492992, 0. ])], 4: [0, array([-0.629148, -0.852142, 0. ])], 5: [0, array([ 0.411048, -1.945121, 0. ])], 6: [0, array([-0.860922, 2.496435, 0. ])], 7: [0, array([-1.267509, -0.91072 , 0.893902])], 8: [0, array([-1.267509, -0.91072 , -0.893902])], 9: [0, array([ 1.010352, -1.752804, 0.80825 ])], 10: [0, array([ 1.010352, -1.752804, -0.80825 ])]}
# } |
# Resource object code (Python 3)
# Created by: object code
# Created by: The Resource Compiler for Qt version 6.0.0
# WARNING! All changes made in this file will be lost!
from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x06{\
T\
EMPLATE = app\x0aLA\
NGUAGE = C++\x0aTAR\
GET = as\
sistant\x0a\x0aCONFIG \
+= qt war\
n_on\x0aQT \
+= xml networ\
k\x0a\x0aPROJECTNAME \
= Assistan\
t\x0aDESTDIR \
= ../../bin\
\x0a\x0aFORMS += findd\
ialog.ui \x5c\x0a \
helpdialog.ui\
\x5c\x0a mainw\
indow.ui \x5c\x0a \
settingsdialo\
g.ui \x5c\x0a t\
abbedbrowser.ui \
\x5c\x0a topicc\
hooser.ui\x0a\x0aSOURC\
ES += main.cpp \x5c\
\x0a helpwin\
dow.cpp \x5c\x0a \
topicchooser.c\
pp \x5c\x0a doc\
uparser.cpp \x5c\x0a \
settingsdi\
alog.cpp \x5c\x0a \
index.cpp \x5c\x0a \
profile.c\
pp \x5c\x0a con\
fig.cpp \x5c\x0a \
finddialog.cpp\
\x5c\x0a helpd\
ialog.cpp \x5c\x0a \
mainwindow.c\
pp \x5c\x0a tab\
bedbrowser.cpp\x0a\x0a\
HEADERS +\
= helpwindow.h \x5c\
\x0a topicch\
ooser.h \x5c\x0a \
docuparser.h \x5c\
\x0a setting\
sdialog.h \x5c\x0a \
index.h \x5c\x0a \
profile.h \
\x5c\x0a finddi\
alog.h \x5c\x0a \
helpdialog.h \x5c\x0a\
mainwind\
ow.h \x5c\x0a t\
abbedbrowser.h \x5c\
\x0a config.\
h\x0a\x0aRESOURCES += \
assistant.qrc\x0a\x0aD\
EFINES += QT_KEY\
WORDS\x0a#DEFINES +\
= QT_PALMTOPCEN\
TER_DOCS\x0a!networ\
k:DEFINES \
+= QT_INTERNAL_\
NETWORK\x0aelse:QT \
+= network\x0a!xml:\
DEFINES \
+= QT_IN\
TERNAL_XML\x0aelse:\
QT += xml\x0ainclud\
e( ../../src/qt_\
professional.pri\
)\x0a\x0awin32 {\x0a \
LIBS += -lshell3\
2\x0a RC_FILE = \
assistant.rc\x0a}\x0a\x0a\
macos {\x0a ICON\
= assistant.icn\
s\x0a TARGET = a\
ssistant\x0a# QM\
AKE_INFO_PLIST =\
Info_mac.plist\x0a\
}\x0a\x0a#target.path \
= $$[QT_INSTALL_\
BINS]\x0a#INSTALLS \
+= target\x0a\x0a#assi\
stanttranslation\
s.files = *.qm\x0a#\
assistanttransla\
tions.path = $$[\
QT_INSTALL_TRANS\
LATIONS]\x0a#INSTAL\
LS += assistantt\
ranslations\x0a\x0aTRA\
NSLATIONS \
= assistant_de.\
ts \x5c\x0a \
assistant\
_fr.ts\x0a\x0a\x0aunix:!c\
ontains(QT_CONFI\
G, zlib):LIBS +=\
-lz\x0a\x0a\x0atarget.pa\
th=$$[QT_INSTALL\
_BINS]\x0aINSTALLS \
+= target\x0a\
"
qt_resource_name = b"\
\x00\x08\
\x0e\x84\x7fC\
\x00e\
\x00x\x00a\x00m\x00p\x00l\x00e\x00s\
\x00\x07\
\x0c\xe8G\xe5\
\x00e\
\x00x\x00a\x00m\x00p\x00l\x00e\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x16\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01vG\xd7\x06\xb8\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_git_log_history.py
'''
import time
import datetime
from PyQt5 import QtWidgets
from PyQt5 import QtCore
#------------------------------------------------------------
#
# WbLogHistoryOptions - option to control which commit logs to show
#
#------------------------------------------------------------
class WbLogHistoryOptions(QtWidgets.QDialog):
def __init__( self, app, parent ):
self.app = app
prefs = self.app.prefs.log_history
super().__init__( parent )
self.setWindowTitle( T_('Commit Log History Options - %s') % (' '.join( app.app_name_parts ),) )
self.use_limit = QtWidgets.QCheckBox( T_('Show only') )
self.use_until = QtWidgets.QCheckBox( T_('Show Until') )
self.use_since = QtWidgets.QCheckBox( T_('Show Since') )
self.limit = QtWidgets.QSpinBox()
self.limit.setRange( 1, 1000000 )
self.limit.setSuffix( T_(' Commits') )
today = QtCore.QDate.currentDate()
the_past = QtCore.QDate( 1990, 1, 1 )
self.until = QtWidgets.QCalendarWidget()
self.until.setDateRange( today, the_past )
self.until.setHorizontalHeaderFormat( self.until.SingleLetterDayNames )
self.until.setGridVisible( True )
self.until.setDateEditEnabled( True )
self.until.setVerticalHeaderFormat( self.until.NoVerticalHeader )
self.since = QtWidgets.QCalendarWidget()
self.since.setDateRange( today, the_past )
self.since.setHorizontalHeaderFormat( self.since.SingleLetterDayNames )
self.since.setGridVisible( True )
self.since.setDateEditEnabled( True )
self.since.setVerticalHeaderFormat( self.since.NoVerticalHeader )
self.buttons = QtWidgets.QDialogButtonBox()
self.buttons.addButton( self.buttons.Ok )
self.buttons.addButton( self.buttons.Cancel )
self.buttons.accepted.connect( self.accept )
self.buttons.rejected.connect( self.reject )
layout = QtWidgets.QGridLayout()
row = 0
layout.addWidget( self.use_limit, row, 0 )
layout.addWidget( self.limit, row, 1, 1, 3 )
row += 1
layout.addWidget( self.use_since, row, 0 )
layout.addWidget( self.since, row, 1 )
layout.addWidget( self.use_until, row, 2 )
layout.addWidget( self.until, row, 3 )
row += 1
layout.addWidget( self.buttons, row, 0, 1, 4 )
self.setLayout( layout )
# --- limit
self.use_limit.setChecked( prefs.use_default_limit )
self.limit.setValue( prefs.default_limit )
self.limit.setEnabled( prefs.use_default_limit )
# --- until
self.use_until.setChecked( prefs.use_default_until_days_interval )
until = QtCore.QDate.currentDate()
until = until.addDays( -prefs.default_until_days_interval )
self.until.setSelectedDate( until )
self.until.setEnabled( prefs.use_default_until_days_interval )
# --- since
self.use_since.setChecked( prefs.use_default_since_days_interval )
since = QtCore.QDate.currentDate()
since = since.addDays( -prefs.use_default_since_days_interval )
self.since.setSelectedDate( since )
self.since.setEnabled( prefs.use_default_since_days_interval )
# --- connect up behavior
self.use_limit.stateChanged.connect( self.limit.setEnabled )
self.use_until.stateChanged.connect( self.until.setEnabled )
self.use_since.stateChanged.connect( self.since.setEnabled )
self.since.selectionChanged.connect( self.__sinceChanged )
self.until.selectionChanged.connect( self.__untilChanged )
def __sinceChanged( self ):
# since must be less then until
since = self.since.selectedDate()
until = self.until.selectedDate()
if since >= until:
until = since.addDays( 1 )
self.until.setSelectedDate( until )
def __untilChanged( self ):
# since must be less then until
since = self.since.selectedDate()
until = self.until.selectedDate()
if since >= until:
since = until.addDays( -1 )
self.since.setSelectedDate( since )
def getLimit( self ):
if self.use_limit.isChecked():
return self.limit.value()
else:
return None
def getUntil( self ):
if self.use_until.isChecked():
qt_until = self.until.selectedDate()
until = datetime.date( qt_until.year(), qt_until.month(), qt_until.day() )
return time.mktime( until.timetuple() )
else:
return None
def getSince( self ):
if self.use_since.isChecked():
qt_since = self.since.selectedDate()
since = datetime.date( qt_since.year(), qt_since.month(), qt_since.day() )
return time.mktime( since.timetuple() )
else:
return None
|
"""Module to create database from downloaded comments in JSON format.
"""
import json
import sqlite3
from datetime import datetime
from chatbot.config import *
START_ROW = 0
CLEANUP_ROW = 1000000
PRINT_STATUS_ROW = 100000
def create_table(cursor):
sql = "DROP TABLE IF EXISTS parent_reply;"
cursor.execute(sql)
sql = """
CREATE TABLE IF NOT EXISTS parent_reply(
parent_id TEXT PRIMARY KEY, comment_id TEXT UNIQUE, parent TEXT,
comment TEXT, subreddit TEXT, unix INT, score INT);
"""
cursor.execute(sql)
def format_data(data):
new_line = " newlinechar "
data = data.replace("\n", new_line).replace("\r", new_line).replace('"', "'")
return data
def fetch_one_row(cursor, sql):
try:
cursor.execute(sql)
result = cursor.fetchone()
if result is not None:
return result[0]
else:
return None
except Exception as e:
print("fetch_one_row ", e)
return None
def find_parent(cursor, parent_id):
sql = f"SELECT comment FROM parent_reply WHERE comment_id = '{parent_id}' LIMIT 1;"
return fetch_one_row(cursor, sql)
def find_existing_score(cursor, parent_id):
sql = f"SELECT score FROM parent_reply WHERE parent_id = '{parent_id}' LIMIT 1;"
return fetch_one_row(cursor, sql)
def is_acceptable(data):
if len(data.split(' ')) > 50 or len(data) < 1:
return False
elif len(data) > 1000:
return False
elif data == '[deleted]' or data == '[removed]':
return False
else:
return True
def transaction_builder(cursor, connection, sql, sql_transactions):
sql_transactions.append(sql)
if len(sql_transactions) > 1000:
cursor.execute('BEGIN TRANSACTION')
for s in sql_transactions:
try:
cursor.execute(s)
except Exception as e:
pass
# print("transaction_builder ", e)
# print("S:", s)
connection.commit()
sql_transactions = []
return sql_transactions
def sql_insert_replace_comment(comment_id, parent_id, parent_body, comment_body, subreddit, created_utc, score):
if parent_body is not None:
parent_body_sql = f"""parent = "{parent_body}", """
else:
parent_body_sql = ""
sql = """
UPDATE parent_reply
SET parent_id = "{}", comment_id = "{}", {}comment = "{}", subreddit = "{}", unix = {}, score = {}
WHERE parent_id = "{}";
""".format(parent_id, comment_id, parent_body_sql, comment_body, subreddit, int(created_utc), score, parent_id)
return sql
def sql_insert_has_parent(comment_id, parent_id, parent_body, comment_body, subreddit, created_utc, score):
sql = """
INSERT INTO parent_reply (parent_id, comment_id, parent, comment, subreddit, unix, score)
VALUES ("{}","{}","{}","{}","{}",{},{});
""".format(parent_id, comment_id, parent_body, comment_body, subreddit, int(created_utc), score)
return sql
def sql_insert_no_parent(comment_id, parent_id, comment_body, subreddit, created_utc, score):
sql = """
INSERT INTO parent_reply (parent_id, comment_id, comment, subreddit, unix, score)
VALUES ("{}","{}","{}","{}",{},{});
""".format(parent_id, comment_id, comment_body, subreddit, int(created_utc), score)
return sql
def clean_up(cursor, connection):
print("Cleaning up!")
sql = "DELETE FROM parent_reply WHERE parent IS NULL"
cursor.execute(sql)
connection.commit()
cursor.execute("VACUUM")
connection.commit()
def construct_sql(cursor, row, paired_rows):
row = json.loads(row)
comment_id = row['name']
parent_id = row['parent_id']
comment_body = format_data(row['body'])
created_utc = row['created_utc']
score = row['score']
subreddit = row['subreddit']
parent_body = find_parent(cursor, parent_id)
sql = None
# filter some "useless" comments
existing_comment_score = find_existing_score(cursor, parent_id)
if existing_comment_score is not None:
if score > existing_comment_score:
if is_acceptable(comment_body):
sql = sql_insert_replace_comment(
comment_id, parent_id, parent_body, comment_body, subreddit, created_utc, score)
else:
if is_acceptable(comment_body):
if parent_body is not None:
if score >= 2:
sql = sql_insert_has_parent(
comment_id, parent_id, parent_body, comment_body, subreddit, created_utc, score)
paired_rows += 1
else:
sql = sql_insert_no_parent(comment_id, parent_id, comment_body, subreddit, created_utc, score)
return sql, paired_rows
def main():
sql_transactions = []
connection = sqlite3.connect(DB_PATH)
cursor = connection.cursor()
create_table(cursor)
row_counter = 0
paired_rows = 0
with open(JSON_COMMENTS_PATH, buffering=1000) as f:
for row in f:
row_counter += 1
if row_counter > START_ROW:
try:
sql, paired_rows = construct_sql(cursor, row, paired_rows)
if sql is not None:
sql_transactions = transaction_builder(cursor, connection, sql, sql_transactions)
except Exception as e:
print(str(e))
if row_counter % PRINT_STATUS_ROW == 0:
print(f"Total rows read: {row_counter}, Paired rows: {paired_rows}, Time: {str(datetime.now())}")
if row_counter % CLEANUP_ROW == 0:
clean_up(cursor, connection)
if __name__ == '__main__':
main()
|
import os
import glob
from catsndogs.data import training_dir, is_cat, is_dog
dogs = os.path.join(training_dir, "dog")
cats = os.path.join(training_dir, "cat")
def plot_samples(axes=None,
n=4):
import matplotlib.pyplot as plt
import numpy as np
from skimage import io
m = 2
cats = glob.glob(os.path.join(training_dir, "cat", "*.jpg"))
dogs = glob.glob(os.path.join(training_dir, "dog", "*.jpg"))
if axes is None:
f, axes = plt.subplots(m, n, figsize=(n*2, m*2))
else:
f = axes.figure()
for i in range(n):
ax = axes[0, i]
img = io.imread(np.random.choice(cats))
ax.imshow(img)
ax.set_title("({}) A cat".format(chr(ord("a") + i)), loc="left")
ax.set_xticks([])
ax.set_yticks([])
for i in range(n):
ax = axes[1, i]
img = io.imread(np.random.choice(dogs))
ax.imshow(img)
ax.set_title("({}) A dog".format(chr(ord("e") + i)), loc="left")
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
|
#!/usr/bin/env python3
import argparse
import os
import re
import sqlite3
import sys
DEFAULT_DB_FILE = os.path.join(sys.path[0], 'database.db')
def print_classification_count(conn):
c = conn.cursor()
c.execute("""
SELECT CLASSIFICATION, COUNT(*)
FROM `GithubProjectUnfiltered`
WHERE STATUS == "IMPLEMENTED"
GROUP BY CLASSIFICATION
ORDER BY COUNT(*) DESC
""")
# output table in latex format
print(r"\begin{tabular}{ l c } \toprule")
print(r" type & projects \\ \midrule")
for line in c.fetchall():
print(r" {btype} & {projects} \\".format(btype=line[0], projects=line[1]))
print(r"\bottomrule \end{tabular}")
def print_classification_in_detail(conn, benchdir):
c = conn.cursor()
c.execute("""
SELECT CLASSIFICATION, GITHUB_OWNER_NAME, GITHUB_PROJECT_NAME
FROM `GithubProjectUnfiltered`
WHERE STATUS == "IMPLEMENTED"
""")
benchmarks = {}
for line in c.fetchall():
btype = line[0]
dir_name = "-".join(line[1:3]).replace('\\', '-').replace('/', '-') + "_test"
if btype not in benchmarks:
benchmarks[btype] = []
with open(os.path.join(benchdir, dir_name + ".c"), 'r') as f:
found_benchmarks = re.findall(r'BENCHMARK\s*\(\s*(\w+)\s*,\s*(\w+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)', f.read())
benchmarks[btype].append({dir_name: len(found_benchmarks)})
rows = []
for btype, b in benchmarks.items():
n_benchmarks = sum([n for v in b for n in v.values()])
latex_type = btype.replace('_', '\_')
rows.append((latex_type, len(b), n_benchmarks))
rows.sort(key=lambda r: r[2], reverse=True)
rows.sort(key=lambda r: r[1], reverse=True)
print(r"\begin{tabular}{ l c c } \toprule")
print(r" type & projects & benchmarks \\ \midrule")
for row in rows:
print(r" {} & {} & {}\\".format(*row))
print(r" \hline")
print(r" sum & {} & {}\\".format(sum([r[1] for r in rows]), sum([r[2] for r in rows])))
print(r"\bottomrule\end{tabular}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Print some statistics about the database')
parser.add_argument('benchdir', metavar='BENCHDIR', nargs='?', type=str,
help='directory which contains all the benchmarks')
parser.add_argument('--database', metavar='FILE', type=str, nargs='?', default=DEFAULT_DB_FILE,
help='filepath to database')
args = parser.parse_args()
if args.benchdir is not None and not os.path.isdir(args.benchdir):
parser.error('"{}" is not a directory'.format(args.benchdir))
if not os.path.isfile(args.database):
parser.error('"{}" is not a file'.format(args.database))
conn = sqlite3.connect(args.database)
if args.benchdir is None:
print_classification_count(conn)
else:
print_classification_in_detail(conn, args.benchdir)
|
from .urban3d_training import *
from .urban3d_validation import *
from .urban3d_training_cgan import *
|
from typing import List, Literal, Tuple
from colorama import Fore, Style # type: ignore
from .util import create_id, format_amount
def format_name(acc: "Account", in_relation_to: "Account | None" = None) -> str:
if in_relation_to is None:
return f"{acc.name} ({acc.id})"
if (
hasattr(acc, "_owner")
and hasattr(in_relation_to, "_owner")
and acc._owner != in_relation_to._owner # type: ignore
):
return f"{acc.name} ({acc.id}) owned by {acc._owner.name} ({acc._owner.id})" # type: ignore
return f"{acc.name} ({acc.id})"
def transfer_msg(
source: "BalanceAccount",
dest: "BalanceAccount",
) -> Tuple[str, str]:
source_name = format_name(source, dest)
dest_name = format_name(dest, source)
return (f"Transfer to {dest_name}", f"Transfer from {source_name}")
class Transaction:
def __init__(self, amount: int, description: str) -> None:
self.amount = amount
self.description = description
def str(self, indent=0) -> str:
color = Fore.GREEN if self.amount > 0 else Fore.RED
return (
f"{' ' * indent}{color}{format_amount(self.amount)}{Style.RESET_ALL} -"
f" {self.description}"
)
class Account:
_id: str
_type: str
_name: str
def __init__(self, type: str, name: str) -> None:
assert len(type) > 0, "Account type cannot be empty"
assert len(name) > 0, "Account name cannot be empty"
self._type = type
self._name = name
self._id = create_id(type[0])
@property
def id(self) -> str:
return self._id
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name: str) -> None:
assert len(name) > 0, "Account name cannot be empty"
self._name = name
@property
def type(self):
return self._type
class BalanceAccount(Account):
_balance: int
_transactions: List[Transaction]
_owner: "Account"
def __init__(self, type: str, name: str) -> None:
super().__init__(type, name)
self._balance = 0
self._transactions = []
pass
@property
def balance(self) -> int:
return self._balance
@property
def balance_str(self) -> str:
return format_amount(self._balance, include_sign="negative")
@property
def transactions(self) -> List[Transaction]:
return self._transactions
def transactions_str(self, indent: int = 0) -> str:
if len(self._transactions) == 0:
return ""
return "\n".join(t.str(indent) for t in self._transactions)
def account_str(self, indent: int = 0) -> str:
header = (
f"{' ' * indent}{self.name} ({self.type}, {self.id}): {self.balance_str}"
)
if len(self._transactions) > 0:
return header + "\n" + self.transactions_str(indent + 4)
else:
return header
def _post(self, amount: int, description: str) -> None:
if amount == 0:
return
self._transactions.append(Transaction(amount, description))
def deposit(self, amount: int, description: str | None = None) -> None:
if amount < 0:
raise ValueError("Cannot deposit negative amount")
self._balance += amount
self._post(amount, description or "Deposit")
def withdraw(self, amount: int, description: str | None = None) -> None:
if amount < 0:
raise ValueError("Cannot withdraw negative amount")
if amount > self._balance:
raise ValueError("Insufficient funds")
self._balance -= amount
self._post(-amount, description or "Withdrawal")
def transfer(self, other: "BalanceAccount", amount: int) -> None:
if self is other:
raise ValueError("Cannot transfer to self")
if amount < 0:
raise ValueError("Cannot transfer negative amount")
to_msg, from_msg = transfer_msg(self, other)
try:
self.withdraw(amount, to_msg)
except ValueError as e:
raise ValueError(to_msg) from e
# deposit cannot fail because we already checked for negative amount
other.deposit(amount, from_msg)
|
"""
SPDX-FileCopyrightText: © 2016 Ben Dudson, University of York. Email: benjamin.dudson@york.ac.uk
SPDX-License-Identifier: MIT
"""
from freeqdsk import _fileutils
from io import StringIO
def test_f2s():
assert _fileutils.f2s(0.0) == " 0.000000000E+00"
assert _fileutils.f2s(1234) == " 1.234000000E+03"
assert _fileutils.f2s(-1.65281e12) == "-1.652810000E+12"
assert _fileutils.f2s(-1.65281e-2) == "-1.652810000E-02"
def test_ChunkOutput():
output = StringIO()
co = _fileutils.ChunkOutput(output)
for val in [1.0, -3.2, 6.2e5, 8.7654e-12, 42.0, -76]:
co.write(val)
assert (
output.getvalue()
== """ 1.000000000E+00-3.200000000E+00 6.200000000E+05 8.765400000E-12 4.200000000E+01
-76"""
)
|
import re
import os
import csv
from time import localtime,strftime
FTI_FILE_HEADER= \
""";FTI Report
;Interfaces Internal/External
;Author Author
;Version Version
;Date %s \n\n""" % (strftime("%d %b %Y %H:%M:%S", localtime()))
def Process(File_Name,Symbols):
file = open(File_Name)
fields = []
entries = 0
for line in file:
current_line = line.strip()+"\n",
if (current_line[0][0]=="_"):
if (current_line[0].find(".")<0):
#print current_line[0]
csv = current_line[0].replace('\t',',').replace(' ',',').replace('\n','')
field = csv.split(',')
if field[0] in Symbols:
fields.append(field)
entries += 1
#break
print entries, " Symbols Found"
return fields
def outputCSV(outputfile,data):
exportfile = open(outputfile,'w')
count = 0
exportfile.write(FTI_FILE_HEADER)
for row in data:
#print row,
#print len(row)
if len(row) == 3:
size = int(row[2],16)
#print row[0],size
csv_entry = "%-40s, %10s, %5s, %8s \n" % (row[0],row[1]+"00000000","32","FFFFFFFF")
#csv_entry = row[0]+",\t"+row[1]+",\t"+row[2]+"\n"
exportfile.write(csv_entry)
count += 1
exportfile.close()
print count, " Entries added"
def loadSymbols(datafile):
symbolfile = open(datafile)
symbols = []
for row in symbolfile:
symbols.append(row.strip())
return symbols
if __name__ == '__main__':
symbols = loadSymbols("symbols.txt")
infile = "SMC00-0000-0000.map"
outputfile = infile+".csv"
data = Process(infile,symbols)
outputCSV(outputfile,data)
|
import pytest
from argo.workflows.client import V1alpha1Arguments, V1alpha1Inputs, V1Toleration
from pydantic import ValidationError
from hera.input import InputFrom
from hera.operator import Operator
from hera.resources import Resources
from hera.retry import Retry
from hera.task import Task
from hera.toleration import GPUToleration
from hera.volumes import EmptyDirVolume, ExistingVolume, Volume
def test_next_and_shifting_set_correct_dependencies(no_op):
t1, t2, t3 = Task('t1', no_op), Task('t2', no_op), Task('t3', no_op)
t1.next(t2).next(t3)
assert t2.argo_task.dependencies == ['t1']
assert t3.argo_task.dependencies == ['t2']
t4, t5, t6 = Task('t4', no_op), Task('t5', no_op), Task('t6', no_op)
t4 >> t5 >> t6
assert t5.argo_task.dependencies == ['t4']
assert t6.argo_task.dependencies == ['t5']
def test_when_correct_expression_and_dependencies(no_op):
t1, t2, t3 = Task('t1', no_op), Task('t2', no_op), Task('t3', no_op)
t2.when(t1, Operator.equals, "t2")
t3.when(t1, Operator.equals, "t3")
assert t2.argo_task.dependencies == ['t1']
assert t3.argo_task.dependencies == ['t1']
assert t2.argo_task._when == "{{tasks.t1.outputs.result}} == t2"
assert t3.argo_task._when == "{{tasks.t1.outputs.result}} == t3"
def test_retry_limits_fail_validation():
with pytest.raises(ValidationError):
Retry(duration=5, max_duration=4)
def test_func_and_func_param_validation_raises_on_args_not_passed(op):
with pytest.raises(AssertionError) as e:
Task('t', op, [])
assert str(e.value) == 'no parameters passed for function'
def test_func_and_func_param_validation_raises_on_difference(op):
with pytest.raises(AssertionError) as e:
Task('t', op, [{'a': 1}, {'b': 1}])
assert str(e.value) == 'mismatched function arguments and passed parameters'
def test_param_getter_returns_empty(no_op):
t = Task('t', no_op)
assert not t.get_parameters()
def test_param_getter_parses_on_multi_params(op):
t = Task('t', op, [{'a': 1}, {'a': 2}, {'a': 3}])
params = t.get_parameters()
for p in params:
assert p.name == 'a'
assert p.value == '{{item.a}}'
def test_param_getter_parses_single_param_val_on_json_payload(op):
t = Task('t', op, [{'a': 1}])
param = t.get_parameters()[0]
assert param.name == 'a'
assert param.value == '1' # from json.dumps
def test_param_getter_parses_single_param_val_on_base_model_payload(mock_model, op):
t = Task('t', op, [{'a': mock_model()}])
param = t.get_parameters()[0]
assert param.name == 'a'
assert param.value == '{"field1": 1, "field2": 2}'
def test_param_script_portion_adds_formatted_json_calls(op):
t = Task('t', op, [{'a': 1}])
script = t.get_param_script_portion()
assert script == 'import json\na = json.loads(\'{{inputs.parameters.a}}\')\n'
def test_script_getter_returns_expected_string(op, typed_op):
t = Task('t', op, [{'a': 1}])
script = t.get_script()
assert script == 'import json\na = json.loads(\'{{inputs.parameters.a}}\')\n\nprint(a)\n'
t = Task('t', typed_op, [{'a': 1}])
script = t.get_script()
assert script == 'import json\na = json.loads(\'{{inputs.parameters.a}}\')\n\nprint(a)\nreturn [{\'a\': (a, a)}]\n'
def test_script_getter_parses_multi_line_function(long_op):
t = Task(
't',
long_op,
[
{
'very_long_parameter_name': 1,
'very_very_long_parameter_name': 2,
'very_very_very_long_parameter_name': 3,
'very_very_very_very_long_parameter_name': 4,
'very_very_very_very_very_long_parameter_name': 5,
}
],
)
expected_script = """import json
very_long_parameter_name = json.loads('{{inputs.parameters.very_long_parameter_name}}')
very_very_long_parameter_name = json.loads('{{inputs.parameters.very_very_long_parameter_name}}')
very_very_very_long_parameter_name = json.loads('{{inputs.parameters.very_very_very_long_parameter_name}}')
very_very_very_very_long_parameter_name = json.loads('{{inputs.parameters.very_very_very_very_long_parameter_name}}')
very_very_very_very_very_long_parameter_name = json.loads('{{inputs.parameters.very_very_very_very_very_long_parameter_name}}')
print(42)
"""
script = t.get_script()
assert script == expected_script
def test_resources_returned_with_appropriate_limits(op):
r = Resources()
t = Task('t', op, [{'a': 1}], resources=r)
resources = t.get_resources()
assert resources.limits['cpu'] == '1'
assert resources.limits['memory'] == '4Gi'
def test_resources_returned_with_gpus(op):
r = Resources(gpus=2)
t = Task('t', op, [{'a': 1}], resources=r)
resources = t.get_resources()
assert resources.requests['nvidia.com/gpu'] == '2'
assert resources.limits['nvidia.com/gpu'] == '2'
def test_parallel_items_assemble_base_models(multi_op, mock_model):
t = Task(
't',
multi_op,
[
{'a': 1, 'b': {'d': 2, 'e': 3}, 'c': mock_model()},
{'a': 1, 'b': {'d': 2, 'e': 3}, 'c': mock_model()},
{'a': 1, 'b': {'d': 2, 'e': 3}, 'c': mock_model()},
],
)
items = t.get_parallel_items()
for item in items:
assert item['a'] == '1'
assert item['b'] == '{"d": 2, "e": 3}'
assert item['c'] == '{"field1": 1, "field2": 2}'
def test_volume_mounts_returns_expected_volumes(no_op):
r = Resources(
volume=Volume(name='v1', size='1Gi', mount_path='/v1'),
existing_volume=ExistingVolume(name='v2', mount_path='/v2'),
empty_dir_volume=EmptyDirVolume(name='v3'),
)
t = Task('t', no_op, resources=r)
vs = t.get_volume_mounts()
assert vs[0].name == 'v1'
assert vs[0].mount_path == '/v1'
assert vs[1].name == 'v2'
assert vs[1].mount_path == '/v2'
assert vs[2].name == 'v3'
assert vs[2].mount_path == '/dev/shm'
def test_gpu_toleration_returns_expected_toleration():
tn = GPUToleration
assert tn.key == 'nvidia.com/gpu'
assert tn.effect == 'NoSchedule'
assert tn.operator == 'Equal'
assert tn.value == 'present'
def test_task_command_parses(mock_model, op):
t = Task('t', op, [{'a': mock_model()}])
assert t.get_command() == ['python']
def test_task_spec_returns_with_parallel_items(op):
t = Task('t', op, [{'a': 1}, {'a': 1}, {'a': 1}])
s = t.get_task_spec()
items = [{'a': '1'}, {'a': '1'}, {'a': '1'}]
assert s.name == 't'
assert s.template == 't'
assert len(s.arguments.parameters) == 1
assert len(s.with_items) == 3
assert s.with_items == items
def test_task_spec_returns_with_single_values(op):
t = Task('t', op, [{'a': 1}])
s = t.get_task_spec()
assert s.name == 't'
assert s.template == 't'
assert len(s.arguments.parameters) == 1
assert s.arguments.parameters[0].name == 'a'
assert s.arguments.parameters[0].value == '1'
def test_task_template_does_not_contain_gpu_references(op):
t = Task('t', op, [{'a': 1}], resources=Resources())
tt = t.get_task_template()
assert isinstance(tt.name, str)
assert isinstance(tt.script.source, str)
assert isinstance(tt.arguments, V1alpha1Arguments)
assert isinstance(tt.inputs, V1alpha1Inputs)
assert tt.node_selector is None
assert tt.tolerations is None
assert tt.retry_strategy is None
def test_task_template_contains_expected_field_values_and_types(op):
t = Task(
't',
op,
[{'a': 1}],
resources=Resources(gpus=1),
tolerations=[GPUToleration],
node_selectors={'abc': '123-gpu'},
retry=Retry(duration=1, max_duration=2),
)
tt = t.get_task_template()
assert isinstance(tt.name, str)
assert isinstance(tt.script.source, str)
assert isinstance(tt.arguments, V1alpha1Arguments)
assert isinstance(tt.inputs, V1alpha1Inputs)
assert isinstance(tt.node_selector, dict)
assert isinstance(tt.tolerations, list)
assert all([isinstance(x, V1Toleration) for x in tt.tolerations])
assert tt.name == 't'
assert tt.script.source == 'import json\na = json.loads(\'{{inputs.parameters.a}}\')\n\nprint(a)\n'
assert tt.arguments.parameters[0].name == 'a'
assert tt.inputs.parameters[0].name == 'a'
assert len(tt.tolerations) == 1
assert tt.tolerations[0].key == 'nvidia.com/gpu'
assert tt.tolerations[0].effect == 'NoSchedule'
assert tt.tolerations[0].operator == 'Equal'
assert tt.tolerations[0].value == 'present'
assert tt.retry_strategy is not None
assert tt.retry_strategy.backoff.duration == '1'
assert tt.retry_strategy.backoff.max_duration == '2'
def test_task_template_contains_expected_retry_strategy(no_op):
r = Retry(duration=3, max_duration=9)
t = Task('t', no_op, retry=r)
assert t.retry.duration == 3
assert t.retry.max_duration == 9
tt = t.get_task_template()
tr = t.get_retry_strategy()
template_backoff = tt.retry_strategy.backoff
retry_backoff = tr.backoff
assert int(template_backoff.duration) == int(retry_backoff.duration)
assert int(template_backoff.max_duration) == int(retry_backoff.max_duration)
def test_task_get_retry_returns_expected_none(no_op):
t = Task('t', no_op)
tr = t.get_retry_strategy()
assert tr is None
def test_task_sets_user_kwarg_override(kwarg_op):
t = Task('t', kwarg_op, [{'a': 43}])
assert t.parameters[0].name == 'a'
assert t.parameters[0].value == '43'
def test_task_sets_kwarg(kwarg_op, kwarg_multi_op):
t = Task('t', kwarg_op)
assert t.parameters[0].name == 'a'
assert t.parameters[0].value == '42'
t = Task('t', kwarg_multi_op, [{'a': 50}])
assert t.parameters[0].name == 'a'
assert t.parameters[0].value == '50'
assert t.parameters[1].name == 'b'
assert t.parameters[1].value == '43'
def test_task_fails_artifact_validation(no_op, in_artifact):
with pytest.raises(AssertionError) as e:
Task('t', no_op, input_artifacts=[in_artifact, in_artifact])
assert str(e.value) == 'input artifact names must be unique'
def test_task_validation_fails_on_input_from_plus_input_artifact(op, in_artifact):
with pytest.raises(AssertionError) as e:
Task('t', op, input_from=InputFrom(name='test', parameters=['a']), input_artifacts=[in_artifact])
assert str(e.value) == 'cannot supply both InputFrom and Artifacts'
def test_task_input_artifact_returns_expected_list(no_op, out_artifact, in_artifact):
t = Task('t', no_op, input_artifacts=[in_artifact])
artifact = t.inputs.artifacts[0]
assert artifact._from is None
assert artifact.name == in_artifact.name
assert artifact.path == in_artifact.path
def test_task_output_artifact_returns_expected_list(no_op, out_artifact):
t = Task('t', no_op, output_artifacts=[out_artifact])
artifact = t.outputs.artifacts[0]
assert artifact.name == out_artifact.name
assert artifact.path == out_artifact.path
|
class TessellatedShapeBuilderResult(object,IDisposable):
"""
Describes what TessellatedShapeBuilder has
construct.
"""
def Dispose(self):
""" Dispose(self: TessellatedShapeBuilderResult) """
pass
def GetGeometricalObjects(self):
"""
GetGeometricalObjects(self: TessellatedShapeBuilderResult) -> IList[GeometryObject]
When called the first time,returns geometrical objects which were built.
Later calls will throw exceptions.
Returns: Geometrical object which were built.
"""
pass
def GetIssuesForFaceSet(self,setIndex):
"""
GetIssuesForFaceSet(self: TessellatedShapeBuilderResult,setIndex: int) -> IList[TessellatedBuildIssue]
Returns the array of issues encountered while processing
a face set with
index 'setIndex'.
setIndex: Index of the face set.
Returns: Array of issues encountered while processing a face set
with index
'setIndex'.
"""
pass
def GetNumberOfFaceSets(self):
"""
GetNumberOfFaceSets(self: TessellatedShapeBuilderResult) -> int
Gets number of face sets for which 'this' result was obtained.
Returns: The number of face sets.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: TessellatedShapeBuilderResult,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
AreObjectsAvailable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Shows whether 'issues' still contains the original data or whether
these data have already been relinquished by 'getGeometricalObjects'.
The former is true,the later is false.
Get: AreObjectsAvailable(self: TessellatedShapeBuilderResult) -> bool
"""
HasInvalidData=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether there were any inconsistencies in the face sets,
stored in the tessellated shape builder while building
geometrical objects.
Get: HasInvalidData(self: TessellatedShapeBuilderResult) -> bool
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: TessellatedShapeBuilderResult) -> bool
"""
Outcome=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""What kinds of geometrical objects were built.
Get: Outcome(self: TessellatedShapeBuilderResult) -> TessellatedShapeBuilderOutcome
"""
|
from __future__ import annotations
import typing
import dataclasses
@dataclasses.dataclass
class Node(): ...
@dataclasses.dataclass
class Edge():
from_: int
to: int
weight: typing.Optional[int] = None,
capacity: typing.Optional[int] = None
@dataclasses.dataclass
class Graph():
nodes: typing.List[Node]
edges: typing.List[typing.List[Edge]]
@classmethod
def from_size(
cls,
n: int,
) -> Graph:
nodes = [Node() for _ in range(n)]
edges = [[] for _ in range(n)]
return cls(nodes, edges)
def add_edge(
self,
e: Edge,
) -> typing.NoReturn:
self.edges[e.from_].append(e)
def add_edges(
self,
edges: typing.List[Edge],
) -> typing.NoReturn:
for e in edges:
self.add_edge(e)
@property
def size(self) -> int:
return len(self.nodes)
class ShortestDistDesopoPape():
def __call__(
self,
g: Graph,
src: int,
) -> typing.List[int]:
import collections
inf = float('inf')
n = g.size
dist = [inf] * n
dist[src] = 0
dq = collections.deque([src])
state = [-1] * n
while dq:
u = dq.popleft()
state[u] = 0
for e in g.edges[u]:
v, dv = e.to, dist[u] + e.weight
if dv >= dist[v]: continue
dist[v] = dv
if state[v] == 1: continue
if state[v] == -1: dq.append(v)
else: dq.appendleft(v)
state[v] = 1
return dist
def solve(
n: int,
uvc: typing.Iterator[typing.Tuple[int]],
) -> typing.NoReturn:
g = Graph.from_size(n)
for u, v, c in uvc:
g.add_edge(Edge(u, v, c))
s = 0
desopo_pape = ShortestDistDesopoPape()
for i in range(n):
s += sum(desopo_pape(g, i))
print(s)
import sys
def main() -> typing.NoReturn:
n, m = map(int, input().split())
uvc = map(int, sys.stdin.read().split())
uvc = zip(*[uvc] * 3)
solve(n, uvc)
main() |
from time import time
from datetime import datetime
from random import getrandbits
def xFactorOfY(x, y):
return y % x == 0
def countUpperAndLower(string):
return (len([char for char in string if 65 <= ord(char) <= 90]), len([char for char in string if 97 <= ord(char) <= 122]))
def reverse(string):
return string[::-1]
def isPrimeEfficient(x):
return not (x < 2 or any(x % y == 0 for y in range(2, int(x ** 0.5) + 1)))
def isPrime(x):
if x == 1: return False
for y in range(2, int(x/2)+1):
if x % y == 0:
return False
return True
def isPrimeInefficient(x):
for y in range(2, x):
if x % y == 0:
return False
return True
def getFactors(x):
return [y for y in range(1, int(x/2)+1) if x % y == 0]
def isPerfect(x):
return sum(getFactors(x)) == x
def generatePrime():
start = getrandbits(128)
if start % 2 == 0:
start += 1
while not isPrimeEfficient(start):
start += 2
return start
|
#This code contain transformet BERT code from https://github.com/google-research/bert, please see LICENSE-BERT
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
import math
import six
import numpy as np
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.compat.v1.truncated_normal_initializer(stddev=initializer_range)
def conv1d_layer(inputs, filter_width, in_channels, out_channels, padding, activation, initializer, trainable=True, name="conv"):
with tf.compat.v1.variable_scope(name):
filter = tf.compat.v1.get_variable(initializer=initializer, shape=[filter_width, in_channels, out_channels], trainable=trainable, name='filter')
conv = tf.nn.conv1d(inputs, filter, [1], padding=padding, name="conv")
bias = tf.compat.v1.get_variable(initializer=tf.zeros_initializer, shape=[out_channels], trainable=trainable, name='bias')
conv_bias = tf.nn.bias_add(conv, bias, name='conv_bias')
conv_bias_relu = activation(conv_bias, name='conv_bias_relu')
return conv_bias_relu
def dense_layer(input_tensor, hidden_size, activation, initializer, name="dense"):
with tf.compat.v1.variable_scope(name):
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
x = tf.reshape(input_tensor, [-1, input_width])
w = tf.compat.v1.get_variable(initializer=initializer, shape=[input_width, hidden_size], name="w")
z = tf.matmul(x, w, transpose_b=False)
b = tf.compat.v1.get_variable(initializer=tf.zeros_initializer, shape=[hidden_size], name="b")
y = tf.nn.bias_add(z, b)
if (activation):
y = activation(y)
return tf.reshape(y, [batch_size, seq_length, hidden_size])
def layer_norm(input_tensor, trainable=True, name=None):
"""Run layer normalization on the last dimension of the tensor."""
#return tf.keras.layers.LayerNormalization(name=name,trainable=trainable,axis=-1,epsilon=1e-14,dtype=tf.float32)(input_tensor)
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, trainable=trainable, scope=name)
def mix_units(input_tensor, num_units, hidden_size, initializer, dropout_prob=0.1):
#the mix density network is composed of multi-ple stacked linear layers, each followed by the layer normal-ization, ReLU activation and the dropout layer but the last.
layer_output = input_tensor
for i in range(num_units):
with tf.variable_scope("unit_index_%d" %i):
layer_output = dense_layer(layer_output, hidden_size, activation=None, initializer=initializer)
layer_output = layer_norm(layer_output)
layer_output = tf.nn.relu(layer_output)
layer_output = dropout(layer_output, dropout_prob)
return layer_output
def pdf(data, mu, var):
import math
pi = tf.constant(math.pi, dtype=tf.float32)
epsilon = tf.constant(1e-14, dtype=tf.float32)
#p get sometimes 1e-42 (>max float32)
p = tf.math.exp(-tf.math.pow(data - mu, 2, name='p2') / (2.0 * (var + epsilon))) / tf.math.sqrt(2.0 * pi * (var + epsilon), name='p1')
p2 = tf.reduce_sum(tf.math.log(p + tf.constant(1e-35, dtype=tf.float32)), axis=-1, keepdims=False)
p2 = tf.clip_by_value(p2, tf.constant(math.log(1e-35), dtype=tf.float32), tf.constant(math.log(1e+35), dtype=tf.float32))
p2 = tf.math.exp(p2)
return tf.clip_by_value(p2, tf.constant(1e-35, dtype=tf.float32), tf.constant(1e+35, dtype=tf.float32))
def sentence_probabilities(mu, var, m, y, n, b, size_m, size_n, max_mel_length):
with tf.compat.v1.variable_scope("sentence_probabilities"):
#calculate all probabilities for sentence
t = tf.constant(0) #first t row calculated already
probabilities = tf.TensorArray(size=n[b], dtype=tf.float32, name="probabilities")
_, probabilities_last = tf.while_loop(lambda t, probabilities: tf.less(t, n[b]), lambda t, probabilities: [t + 1, probabilities.write(t, pdf(tf.tile(tf.expand_dims(y[b,t], 0), [m[b], 1]), mu[b,:m[b]], var[b,:m[b]]))], [t, probabilities], maximum_iterations=max_mel_length, name="mel_loop")
extended_n = tf.cond(size_n > n[b],
lambda: tf.concat([tf.reshape(probabilities_last.stack(), [n[b], m[b]]), tf.zeros([size_n - n[b], m[b]], tf.float32)], axis=0),
lambda: tf.reshape(probabilities_last.stack(), [n[b], m[b]]))
return tf.cond(size_m > m[b],
lambda: tf.concat([extended_n, tf.zeros([size_n, size_m - m[b]], tf.float32)], axis=1),
lambda: extended_n)
def calculate_alpha(mu, var, m, y, n):
#mu/var = mean/covariance created by neural model with m actual items
#m - actual input sentence length tensor (var is s in the document). mu and var are at max size
#y - mel spectrogram generated from audio for the sentence
#n - actual mel spectrogram length tensor (var is t in the document), y is at max mel spectrogram size
mel_shape = get_shape_list(y, expected_rank=3)
batch_size = mel_shape[0]
max_mel_length = mel_shape[1]
mel_width = mel_shape[2]
mix_shape = get_shape_list(mu, expected_rank=3)
max_mix_length = mix_shape[1]
mix_width = mix_shape[2]
assert mel_width == mix_width
#mu: [B, F, 80]
#var: [B, F=max_mix_length, 80]
#m: [B]
#y: [B, max_mel_length, 80]
#n: [B]
with tf.compat.v1.variable_scope("alignment_loss"):
v_b = tf.constant(0)
v_batch_log_alpha = tf.TensorArray(size=batch_size, dtype=tf.float32, name="batch_log_alpha")
v_losses = tf.TensorArray(size=batch_size, dtype=tf.float32, name="losses")
def batch_body(b, batch_log_alpha, losses):
probabilities = tf.cast(sentence_probabilities(mu, var, m, y, n, b, m[b], n[b], max_mel_length), tf.float64)
pa = tf.concat([[probabilities[0, 0]], tf.zeros([m[b]-1], tf.float64)], axis=0)
scaler = 1 / (tf.reduce_sum(pa) + tf.constant(1e-300, dtype=tf.float64))
prev_alpha = pa * scaler
c = tf.log(scaler)
prev_alpha = tf.clip_by_value(prev_alpha, tf.constant(1e-300, dtype=tf.float64), tf.constant(1e+300, dtype=tf.float64))
sentence_alpha = tf.TensorArray(size=n[b], dtype=tf.float64, name="sentence_alpha")
sentence_alpha = sentence_alpha.write(0, prev_alpha)
sentence_c = tf.TensorArray(size=n[b], dtype=tf.float64, name="sentence_c")
sentence_c = sentence_c.write(0, c)
v_t = tf.constant(1) #first t row calculated already
#range: up to 1024 (we have 1024 frame in y spectrograms)
def mel_body(t, c, prev_alpha, sentence_alpha, sentence_c): #go by mel spectrogram (t,n,y)
n_a = (prev_alpha[1:]+prev_alpha[:-1])*probabilities[t, 1:]
new_a = tf.concat([[prev_alpha[0]*(probabilities[t, 0])], n_a], axis=0)
scaler = 1 / (tf.reduce_sum(new_a) + tf.constant(1e-300, dtype=tf.float64))
new_a = new_a * scaler
c = c + tf.log(scaler)
new_a = tf.clip_by_value(new_a, tf.constant(1e-300, dtype=tf.float64), tf.constant(1e+300, dtype=tf.float64))
return [t + 1, c, tf.reshape(new_a, [m[b]]), sentence_alpha.write(t, new_a), sentence_c.write(t, c)]
t_last, c_out, alpha_l, sentence_alpha_out, sentence_c_out = tf.while_loop(lambda v_t, c, prev_alpha, sentence_alpha, sentence_c: tf.less(v_t, n[b]), mel_body, [v_t, c, prev_alpha, sentence_alpha, sentence_c], name="mel_loop")
sentence_log_alpha_tensor = tf.cast(tf.math.log(sentence_alpha_out.stack()), dtype=tf.float32) - tf.reshape(tf.cast(sentence_c_out.stack(), dtype=tf.float32), [-1, 1])
extended_log_alpha_n = tf.cond(max_mel_length > n[b],
lambda: tf.concat([tf.reshape(sentence_log_alpha_tensor, [n[b], m[b]]), tf.zeros([max_mel_length - n[b], m[b]], tf.float32)], axis=0),
lambda: tf.reshape(sentence_log_alpha_tensor, [n[b], m[b]]))
extended_log_alpha_nm = tf.cond(max_mix_length > m[b],
lambda: tf.concat([extended_log_alpha_n, tf.zeros([max_mel_length, max_mix_length - m[b]], tf.float32)], axis=1),
lambda: extended_log_alpha_n)
return [b + 1, batch_log_alpha.write(b, extended_log_alpha_nm), losses.write(b, -extended_log_alpha_nm[n[b]-1,m[b]-1])]
_, v_batch_log_alpha_out, v_losses_out = tf.while_loop(lambda v_b, v_batch_log_alpha, v_losses: tf.less(v_b, batch_size), batch_body, [v_b, v_batch_log_alpha, v_losses], maximum_iterations=batch_size, name="batch_loop")
return tf.reshape(v_batch_log_alpha_out.stack(), [batch_size, max_mel_length, max_mix_length]), tf.reshape(v_losses_out.stack(), [batch_size, -1])
def calculate_durations(alpha, m, n):
alpha_shape = get_shape_list(alpha, expected_rank=3)
batch_size = alpha_shape[0]
max_mel_length = alpha_shape[1]
max_mix_length = alpha_shape[2]
with tf.compat.v1.variable_scope("alpha_durations"):
v_b = tf.constant(0)
v_batch_durations = tf.TensorArray(size=batch_size, dtype=tf.int32, name="batch_durations")
def batch_body(b, batch_durations):
best = tf.TensorArray(size=n[b], dtype=tf.int32, name="best")
position = m[b] - 1
best_vector = tf.sparse.SparseTensor(indices=[[position]], values=[1], dense_shape=[max_mix_length])
best = best.write(n[b] - 1, tf.sparse.to_dense(best_vector, default_value=0, validate_indices=True))
v_t = n[b] - 2
def mel_body(t, prev_position, best):
#position = tf.cond(prev_position == 0, lambda: 0, lambda: tf.cond(alpha[b, t, prev_position - 1] > alpha[b, t, prev_postion], lambda: prev_position - 1, lambda: prev_position))
position = tf.case([(tf.equal(prev_position, 0), lambda: tf.constant(0)), (tf.greater(alpha[b, t, prev_position - 1], alpha[b, t, prev_position]), lambda: prev_position - 1)], default=lambda: prev_position)
best_vector = tf.sparse.SparseTensor(indices=[[position]], values=[1], dense_shape=[max_mix_length])
return [t - 1, position, best.write(t, tf.sparse.to_dense(best_vector, default_value=0, validate_indices=True))]
_, _, best_out = tf.while_loop(lambda v_t, position, best: tf.greater(v_t, -1), mel_body, [v_t, position, best], name="mel_loop")
d = tf.reduce_sum(best_out.stack(), axis=0, keepdims=False)
return [b + 1, batch_durations.write(b, d)]
_, v_batch_durations_out = tf.while_loop(lambda v_b, v_batch_durations: tf.less(v_b, batch_size), batch_body, [v_b, v_batch_durations], maximum_iterations=batch_size, name="batch_loop")
return tf.reshape(v_batch_durations_out.stack(), [batch_size, max_mix_length])
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=768,
intermediate_act_fn=tf.nn.relu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
#B, F, H
#ss2 = get_shape_list(attention_output, expected_rank=2)
#print ("ss2: ", ss2[0], ss2[1])
intermediate_input = tf.reshape(attention_output, [batch_size, seq_length, input_width])
with tf.variable_scope("intermediate"):
# `context_layer` = [B, F, N*H]
layer1_output = conv1d_layer(intermediate_input, 3, hidden_size, intermediate_size, "SAME",
intermediate_act_fn, create_initializer(initializer_range), name="conv_1")
layer1_with_dropout = dropout(layer1_output, hidden_dropout_prob)
intermediate_output = conv1d_layer(layer1_with_dropout, 3, intermediate_size, hidden_size, "SAME",
intermediate_act_fn, create_initializer(initializer_range), name="conv_2")
intermediate_with_dropout = dropout(intermediate_output, hidden_dropout_prob)
intermediate_output = tf.reshape(intermediate_with_dropout , [-1, input_width])
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
class AlignttsModel(object):
# F = `from_tensor` sequence length in characters
# T = `to_tensor` sequence length in frames
# V - alphabet size
# E - embeddings, hidden size
# D - duration embeddings, hidden size
# M - number of mel buckets in final spectrogram
def __init__(self,
input_tensor,
input_lengths,
input_masks,
input_durations,
mel_tensor,
mel_lengths,
hidden_size=768,
num_hidden_layers=6,
num_attention_heads=2,
filter_width=3,
duration_predictor_hidden_layers=2,
duration_predictor_attention_heads=2,
duration_predictor_hidden_size=128,
num_mix_density_hidden_layers=4, #as in DEEP MIXTURE DENSITY NETWORKS GOOGLE Paper
mix_density_hidden_size=256,
alphabet_size=29,
initializer_range=0.02,
activation_fn=tf.nn.relu,
alpha=1.0,
dropout_prob=0.1,
use_durations=2, #use duration predictor by default
is_trainable=True):
input_shape = get_shape_list(input_tensor, expected_rank=2)
batch_size = input_shape[0]
max_input_length = input_shape[1]
mel_shape = get_shape_list(mel_tensor, expected_rank=3)
max_mel_length = mel_shape[1]
self._num_mels = mel_shape[2]
if is_trainable == False:
dropout_prob = 0.0
#1). embedding table: [V, E] so like this [alphabet_siz, hidden_size]
#lookup in embeddings table to find entry for each character
with tf.compat.v1.variable_scope("input_embeddings"):
#[A, E]
self._encoder_embedding_table = tf.compat.v1.get_variable(initializer=create_initializer(initializer_range),
shape=[alphabet_size, hidden_size], name='encoder_embedding_table')
encoder_embedding_expanded = tf.expand_dims(self._encoder_embedding_table, 0)
encoder_embedding_expanded = tf.tile(encoder_embedding_expanded, [batch_size, 1, 1])
#[B, F] --> [B, F, E]
self._encoder_embedding = tf.gather(encoder_embedding_expanded, input_tensor, axis=1, batch_dims=1, name="encoder_embedding")
#2) make positional encoding
#[B, F, E] --> [B, F, E]
with tf.compat.v1.variable_scope("input_positions"):
self._encoder_position_table = tf.compat.v1.get_variable(initializer=create_initializer(initializer_range),
shape=[max_input_length, hidden_size], name='encoder_position_table')
self._encoder_embedding_with_positions = self._encoder_embedding + tf.expand_dims(self._encoder_position_table, 0)
#3) create 3D mask from 2D to mask attention with shorter sentenses then max_input_length sentence
attention_mask = create_attention_mask_from_input_mask(
input_tensor, input_masks)
#4). encoder FFT block
with tf.compat.v1.variable_scope("encoder_ttf"):
encoder_dropout_prob = dropout_prob
self._encoder_tensor = transformer_model(self._encoder_embedding_with_positions,
attention_mask=attention_mask,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=hidden_size,
intermediate_act_fn=activation_fn,
hidden_dropout_prob=encoder_dropout_prob,
attention_probs_dropout_prob=encoder_dropout_prob ,
initializer_range=0.02,
do_return_all_layers=False)
#5.1) Mix density network
with tf.compat.v1.variable_scope("mix_density_network"):
#[B, F, E] --> [B, F, 256]
self._mix_density_tensor = mix_units(self._encoder_tensor, num_mix_density_hidden_layers, mix_density_hidden_size, create_initializer(initializer_range), dropout_prob)
#[B, F, E] --> [B, F, M*2]
self._mu_and_variance = dense_layer(self._mix_density_tensor, self._num_mels*2, activation=tf.math.softplus, initializer=create_initializer(initializer_range), name="mu_and_variance")
#The last linear layer outputs the mean and variance vectorof multi-dimensional gaussian distributions, which representsthe mel-spectrum distribution of each character.
#The hiddensize of the linear layer in the mix network is set to 256 and thedimension of the output is 160 (80 dimensions for the meanand 80 dimensions for variance of the gaussian distribution).
self._log_alpha, self._per_example_alignment_loss = calculate_alpha(self._mu_and_variance[:, :, :self._num_mels], self._mu_and_variance[:, :, self._num_mels:], input_lengths, mel_tensor, mel_lengths)
self._mix_durations = calculate_durations(self._log_alpha, input_lengths, mel_lengths)
#5.2). Length regulator
#increase hidden lengths as per length predictor values and alpha speech speed coefficient
#5.2.1). embedding table: [V, D] so like this [alphabet_siz, hidden_size]
#lookup in embeddings table to find entry for each character
with tf.compat.v1.variable_scope("duration_embeddings"):
#[A, D]
duration_embedding_table = tf.compat.v1.get_variable(initializer=create_initializer(initializer_range),
shape=[alphabet_size, duration_predictor_hidden_size], name='duration_embedding_table')
duration_embedding_expanded = tf.expand_dims(duration_embedding_table, 0)
duration_embedding_expanded = tf.tile(duration_embedding_expanded, [batch_size, 1, 1])
#[B, F] --> [B, F, D]
duration_embedding = tf.gather(duration_embedding_expanded, input_tensor, axis=1, batch_dims=1, name="duration_embedding")
#5.2.2) make positional encoding
#[B, F, D] --> [B, F, D]
with tf.compat.v1.variable_scope("duration_positions"):
duration_position_table = tf.compat.v1.get_variable(initializer=create_initializer(initializer_range),
shape=[max_input_length, duration_predictor_hidden_size], name='duration_position_table')
duration_embedding_with_positions = duration_embedding + tf.expand_dims(duration_position_table, 0)
#5.2.3). duration FFT block
#[B, F, D] --> [B, F, D]
with tf.compat.v1.variable_scope("duration_ttf"):
duration_prdictor_dropout_prob = dropout_prob
duration_tensor = transformer_model(duration_embedding_with_positions,
attention_mask=attention_mask,
hidden_size=duration_predictor_hidden_size,
num_hidden_layers=duration_predictor_hidden_layers,
num_attention_heads=duration_predictor_attention_heads,
intermediate_size=duration_predictor_hidden_size,
intermediate_act_fn=activation_fn,
hidden_dropout_prob=duration_prdictor_dropout_prob,
attention_probs_dropout_prob=duration_prdictor_dropout_prob,
initializer_range=0.02,
do_return_all_layers=False)
#nominal_durations is an output to train duration predictor
#[B, F, D] --> [B, F, D]
nominal_durations = dense_layer(duration_tensor, 1, activation=activation_fn, initializer=create_initializer(initializer_range), name="nominal_durations")
#[B, F, D] --> [B, F]
self._nominal_durations = tf.squeeze(nominal_durations, axis=-1)
#scale back durations so the sum less or equal to max
mel_length = tf.reduce_sum(tf.cast(tf.math.multiply(self._nominal_durations + 0.5, alpha), tf.int32), axis=-1, keep_dims=True)
scaling_factor = tf.clip_by_value(tf.cast(mel_length, dtype=tf.float32) / tf.cast(max_mel_length, dtype=tf.float32), tf.constant(1, dtype=tf.float32), tf.cast(tf.reduce_max(mel_length), dtype=tf.float32))
scaled_durations = self._nominal_durations / scaling_factor
#[B, F] --> [B, F]
self._mel_durations = tf.cast(tf.math.multiply(scaled_durations + 0.5, alpha), tf.int32)
#Use duration from: 0 - input, 1 - mix network, 2 - duration predictor
durations = tf.case([(tf.equal(use_durations, 0), lambda: input_durations), (tf.equal(use_durations, 1), lambda: self._mix_durations)], default=lambda: self._mel_durations)
#32, 200 -> 32, 1
lengths = tf.fill([batch_size, 1], max_mel_length) - tf.reduce_sum(durations, axis=1, keep_dims=True)
#32, 200 -> 32, 201
durations_with_extra_lengths = tf.concat([durations, lengths], axis=1)
#32, 200, 768 -> 32, 201, 768
encoder_with_extra_zero = tf.concat([self._encoder_tensor, tf.zeros([batch_size, 1, hidden_size], tf.float32)], axis=1)
flatten_durations = tf.reshape(durations_with_extra_lengths, [-1])
flatten_encoder = tf.reshape(encoder_with_extra_zero, [-1, hidden_size])
#32*201, 768 -> 32*1024, 768
#OOM 3216,914,768
encoder_with_flatten_durations = tf.repeat(flatten_encoder, flatten_durations, axis=0, name="encoder_with_flatten_durations")
encoder_with_durations = tf.reshape(encoder_with_flatten_durations, [batch_size, max_mel_length, hidden_size], name="encoder_with_durations")
#6.1). Add positional encoding
#[B, T, E] --> [B, T, E]
with tf.compat.v1.variable_scope("mel_positions"):
self._decoder_position_table = tf.compat.v1.get_variable(initializer=create_initializer(initializer_range),
shape=[max_mel_length, hidden_size], name='decoder_position_table')
decoder_embedding_with_positions = encoder_with_durations + tf.expand_dims(self._decoder_position_table, 0)
#6.2). Decoder mask
#2 --> 32,2
mask_template = tf.tile(tf.constant([[1,0]], tf.int32), [batch_size, 1])
mel_length = tf.reduce_sum(durations, axis=1, keep_dims=True)
#32,2
mask_durations = tf.concat([mel_length, tf.fill([batch_size, 1], max_mel_length) - mel_length], axis=1)
#32,2 -> 32*2
flatten_mask_template = tf.reshape(mask_template, [-1])
#32,2 -> 32*2
flatten_mask_durations = tf.reshape(mask_durations, [-1])
#32*2 -> 32*1024
decoder_flatten_mask = tf.repeat(flatten_mask_template, flatten_mask_durations, axis=0, name="decoder_flatten_mask")
#32*1024 -> 32,1024
decoder_mask = tf.reshape(decoder_flatten_mask, [batch_size, max_mel_length], name="decoder_mask")
# create 3D mask from 2D to mask attention with shorter mel duration then max_mel_length
decoder_attention_mask = create_attention_mask_from_input_mask(
decoder_embedding_with_positions, decoder_mask)
#7). decoder FFT block
with tf.compat.v1.variable_scope("decoder_ttf"):
decoder_dropout_prob = dropout_prob
#attention_mask=decoder_mask,
self._decoder_tensor = transformer_model(decoder_embedding_with_positions,
attention_mask=decoder_attention_mask,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=hidden_size,
intermediate_act_fn=activation_fn,
hidden_dropout_prob=decoder_dropout_prob,
attention_probs_dropout_prob=decoder_dropout_prob,
initializer_range=0.02,
do_return_all_layers=False)
#8). Linear layer, returns mel: [B, T, E] --> [B, T, M]
self._mel_spectrograms = dense_layer(self._decoder_tensor, self._num_mels, activation=None, initializer=create_initializer(initializer_range), name="mel_spectrograms")
@property
def per_example_alignment_loss(self):
return self._per_example_alignment_loss
@property
def log_alpha(self):
return self._log_alpha
@property
def encoder_embedding(self):
return self._encoder_embedding
@property
def encoder_tensor(self):
return self._encoder_tensor
@property
def mix_density_tensor(self):
return self._mix_density_tensor
@property
def mu_and_variance(self):
return self._mu_and_variance
@property
def mu(self):
return self._mu_and_variance[:, :, :self._num_mels]
@property
def var(self):
return self._mu_and_variance[:, :, self._num_mels:]
@property
def mix_durations(self):
return self._mix_durations
@property
def nominal_durations(self):
return self._nominal_durations
@property
def mel_durations(self):
return self._mel_durations
@property
def mel_spectrograms (self):
return self._mel_spectrograms
|
# -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import sys
import os.path
import glob
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy
'''
Draws a graph of the output from "lake_measure.py"
To use, pass in the output file from that tool.
'''
def parse_lake_results(name):
f = open(name, 'r')
x_axis = []
y_axis = []
cloud_axis = []
f.readline()
parts = f.readline().split(',')
names = parts[0]
country = parts[1]
area = parts[2]
f.readline()
for l in f:
parts = l.split(',')
date_parts = parts[0].split('-')
date = datetime.date(int(date_parts[0]), int(date_parts[1]), int(date_parts[2]))
satellite = parts[1]
cloud = int(parts[2])
water = int(parts[3])
# take values with low cloud cover
if cloud < (1 / 0.03 / 0.03):
x_axis.append(date)
y_axis.append(water * 0.03 * 0.03) #pixels * km^2 / pixel, km^2 / pixel = 0.03 * 0.03 / 1
cloud_axis.append(cloud * 0.03 * 0.03)
f.close()
# remove values that differ from neighbors by large amounts
NEIGHBOR_RADIUS = 3
OUTLIER_FACTOR = 0.98
remove = []
for i in range(len(y_axis)):
start = max( 0, i - NEIGHBOR_RADIUS)
end = min(len(y_axis), i + NEIGHBOR_RADIUS)
if i > 0:
neighbors = y_axis[start:i-1]
else:
neighbors = []
if i < len(y_axis) - 1:
neighbors.extend(y_axis[i+1:end])
num_neighbors = end - start - 1
num_outliers = 0
for v in neighbors:
if (v < y_axis[i] * OUTLIER_FACTOR) or (v > y_axis[i] / OUTLIER_FACTOR):
num_outliers += 1
if (num_neighbors == 0) or (float(num_outliers) / num_neighbors >= 0.5):
remove.append(i)
for i in reversed(remove):
y_axis.pop(i)
cloud_axis.pop(i)
x_axis.pop(i)
results = dict()
results['name' ] = names
results['country'] = country
results['area' ] = area
return (results, x_axis, y_axis, cloud_axis)
def plot_results(features, dates, water, clouds, save_directory=None, ground_truth_file=None):
fig, ax = plt.subplots()
water_line = ax.plot(dates, water, linestyle='-', color='b', linewidth=1, label='Landsat-5 Surface Area')
ax.plot(dates, water, 'gs', ms=3)
#ax.bar(dates, water, color='b', width=15, linewidth=0)
#ax.bar(dates, clouds, bottom=water, color='r', width=15, linewidth=0)
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
ax.xaxis.set_minor_locator(mdates.MonthLocator())
ax.set_xlabel('Time')
ax.format_xdata = mdates.DateFormatter('%m/%d/%Y')
if ground_truth_file != None:
(ground_truth_dates, ground_truth_levels) = load_ground_truth(ground_truth_file)
ax2 = ax.twinx()
ground_truth_line = ax2.plot(ground_truth_dates, ground_truth_levels, linestyle='--', color='r', linewidth=2, label='Measured Elevation')
ax2.set_ylabel('Lake Elevation (ft)')
ax2.format_ydata = (lambda x : '%g ft' % (x))
ax2.set_ylim([6372, 6385.5])
ax.format_ydata = (lambda x : '%g km^2' % (x))
ax.set_ylabel('Lake Surface Area (km^2)')
fig.suptitle(features['name']+ ' Surface Area from Landsat')
lns = water_line + ground_truth_line
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=4)
ax.grid(True)
fig.autofmt_xdate()
if save_directory != None:
fig.savefig(os.path.join(save_directory, features['name'] + '.pdf'))
def load_ground_truth(filename):
f = open(filename, 'r')
dates = []
levels = []
all_months = {'Jan' : 1, 'Feb' : 2, 'Mar' : 3, 'Apr' : 4, 'May' : 5, 'Jun' : 6,
'Jul' : 7, 'Aug' : 8, 'Sep' : 9, 'Oct' : 10, 'Nov' : 11, 'Dec' : 12}
for line in f:
parts = line.split()
month = all_months[parts[0].split('-')[0]]
year = int(parts[0].split('-')[1])
if year > 50:
year = 1900 + year
else:
year = 2000 + year
dates.append(datetime.datetime(year, month, 1))
levels.append(float(parts[1]))
return (dates, levels)
# --- Main script ---
if len(sys.argv) > 1:
(features, dates, water, clouds) = parse_lake_results(sys.argv[1])
#plot_results(features, dates, water, clouds, None, 'results/mono_lake_elevation.txt')
plot_results(features, dates, water, clouds)
plt.show()
else:
for fname in glob.iglob(os.path.join('results', '*.txt')):
try:
(features, dates, water, clouds) = parse_lake_results(fname)
except:
print 'Error parsing %s.' % (fname)
continue
if len(dates) > 100:
plot_results(features, dates, water, clouds, save_directory=os.path.join('results', 'graphs'))
|
import random
valid_inputs = ["rock", "paper", "scissor"]
def ask_the_user():
val = input("Choose: rock, paper, scissor \n")
if val in valid_inputs:
return val
else:
print("Sorry, I can't understand")
return ask_the_user()
def ask_the_PC():
return random.choice(valid_inputs)
def get_the_winner(user_response, pc_response):
usr = valid_inputs.index(user_response)
pc = valid_inputs.index(pc_response)
if usr == pc:
return "TIE"
else:
difference = abs(usr - pc)
if difference == 1:
return ("User wins!","PC wins!")[usr < pc]
else:
return ("User wins!","PC wins!")[usr > pc]
def main():
user_response = ask_the_user()
pc_response = ask_the_PC()
print(f"The PC choosed {pc_response}")
print(get_the_winner(user_response, pc_response))
if __name__ == "__main__":
main()
|
'''
High-level editor interface that communicates with underlying editor (like
Espresso, Coda, etc.) or browser. Basically, you should call set_context(obj)
method to set up undelying editor context before using any other method.
This interface is used by zen_actions.py for performing different
actions like Expand abbreviation
@example
import zen_editor
zen_editor.set_context(obj);
//now you are ready to use editor object
zen_editor.get_selection_range();
@author Sergey Chikuyonok (serge.che@gmail.com)
@link http://chikuyonok.ru
Gedit implementation:
@author Franck Marcia (franck.marcia@gmail.com)
'''
import zen_core, zen_actions
import os, re, locale
import zen_dialog
class ZenEditor():
def __init__(self):
self.last_wrap = ''
self.last_expand = ''
zen_core.set_caret_placeholder('')
def set_context(self, context):
"""
Setup underlying editor context. You should call this method before
using any Zen Coding action.
@param context: context object
"""
self.context = context # window
self.buffer = self.context.get_active_view().get_buffer()
self.view = context.get_active_view()
self.document = context.get_active_document()
default_locale = locale.getdefaultlocale()[0]
lang = re.sub(r'_[^_]+$', '', default_locale)
if lang != default_locale:
zen_core.set_variable('lang', lang)
zen_core.set_variable('locale', default_locale.replace('_', '-'))
else:
zen_core.set_variable('lang', default_locale)
zen_core.set_variable('locale', default_locale)
self.encoding = self.document.get_encoding().get_charset()
zen_core.set_variable('charset', self.encoding)
if self.view.get_insert_spaces_instead_of_tabs():
zen_core.set_variable('indentation', " " * context.get_active_view().get_tab_width())
else:
zen_core.set_variable('indentation', "\t")
def get_selection_range(self):
"""
Returns character indexes of selected text
@return: list of start and end indexes
@example
start, end = zen_editor.get_selection_range();
print('%s, %s' % (start, end))
"""
offset_start = self.get_insert_offset()
offset_end = self.get_selection_bound_offset()
if offset_start < offset_end:
return offset_start, offset_end
return offset_end, offset_start
def create_selection(self, offset_start, offset_end=None):
"""
Creates selection from start to end character indexes. If end is
omitted, this method should place caret and start index.
@type start: int
@type end: int
@example
zen_editor.create_selection(10, 40)
# move caret to 15th character
zen_editor.create_selection(15)
"""
if offset_end is None:
iter_start = self.buffer.get_iter_at_offset(offset_start)
self.buffer.place_cursor(iter_start)
else:
iter_start = self.buffer.get_iter_at_offset(offset_start)
iter_end = self.buffer.get_iter_at_offset(offset_end)
self.buffer.select_range(iter_start, iter_end)
def get_current_line_range(self):
"""
Returns current line's start and end indexes
@return: list of start and end indexes
@example
start, end = zen_editor.get_current_line_range();
print('%s, %s' % (start, end))
"""
iter_current = self.get_insert_iter()
offset_start = self.buffer.get_iter_at_line(iter_current.get_line()).get_offset()
offset_end = offset_start + iter_current.get_chars_in_line() - 1
return offset_start, offset_end
def get_caret_pos(self):
""" Returns current caret position """
return self.get_insert_offset()
def set_caret_pos(self, pos):
"""
Sets the new caret position
@type pos: int
"""
self.buffer.place_cursor(self.buffer.get_iter_at_offset(pos))
def get_current_line(self):
"""
Returns content of current line
@return: str
"""
offset_start, offset_end = self.get_current_line_range()
iter_start = self.buffer.get_iter_at_offset(offset_start)
iter_end = self.buffer.get_iter_at_offset(offset_end)
return self.buffer.get_text(iter_start, iter_end).decode(self.encoding)
def replace_content(self, value, offset_start=None, offset_end=None):
"""
Replace editor's content or its part (from start to end index). If
value contains caret_placeholder, the editor will put caret into
this position. If you skip start and end arguments, the whole target's
content will be replaced with value.
If you pass start argument only, the value will be placed at start
string index of current content.
If you pass start and end arguments, the corresponding substring of
current target's content will be replaced with value
@param value: Content you want to paste
@type value: str
@param start: Start index of editor's content
@type start: int
@param end: End index of editor's content
@type end: int
"""
if offset_start is None and offset_end is None:
iter_start = self.buffer.get_iter_at_offset(0)
iter_end = self.get_end_iter()
elif offset_end is None:
iter_start = self.buffer.get_iter_at_offset(offset_start)
iter_end = self.buffer.get_iter_at_offset(offset_start)
else:
iter_start = self.buffer.get_iter_at_offset(offset_start)
iter_end = self.buffer.get_iter_at_offset(offset_end)
self.buffer.delete(iter_start, iter_end)
self.insertion_start = self.get_insert_offset()
padding = zen_actions.get_current_line_padding(self)
self.buffer.insert_at_cursor(zen_core.pad_string(value, padding))
self.insertion_end = self.get_insert_offset()
def get_content(self):
"""
Returns editor's content
@return: str
"""
iter_start = self.buffer.get_iter_at_offset(0)
iter_end = self.get_end_iter()
return self.buffer.get_text(iter_start, iter_end).decode(self.encoding)
def get_syntax(self):
"""
Returns current editor's syntax mode
@return: str
"""
lang = self.context.get_active_document().get_language()
lang = lang and lang.get_name()
if lang == 'CSS': lang = 'css'
elif lang == 'XSLT': lang = 'xsl'
elif lang == 'SASS': lang = 'sass'
else: lang = 'html'
return lang
def get_profile_name(self):
"""
Returns current output profile name (@see zen_coding#setup_profile)
@return {String}
"""
return 'xhtml'
def get_insert_iter(self):
return self.buffer.get_iter_at_mark(self.buffer.get_insert())
def get_insert_offset(self):
return self.get_insert_iter().get_offset()
def get_selection_bound_iter(self):
return self.buffer.get_iter_at_mark(self.buffer.get_selection_bound())
def get_selection_bound_offset(self):
return self.get_selection_bound_iter().get_offset()
def get_end_iter(self):
return self.buffer.get_iter_at_offset(self.buffer.get_char_count())
def get_end_offset(self):
return self.get_end_iter().get_offset()
def start_edit(self):
# Bug when the cursor is at the very beginning.
if self.insertion_start == 0:
self.insertion_start = 1
self.set_caret_pos(self.insertion_start)
if not self.next_edit_point() or (self.get_insert_offset() > self.insertion_end):
self.set_caret_pos(self.insertion_end)
def show_caret(self):
self.view.scroll_mark_onscreen(self.buffer.get_insert())
def get_user_settings_error(self):
return zen_core.get_variable('user_settings_error')
def expand_abbreviation(self, window):
self.set_context(window)
self.buffer.begin_user_action()
result = zen_actions.expand_abbreviation(self)
if result:
self.start_edit()
self.buffer.end_user_action()
def save_selection(self):
self.save_offset_insert = self.get_insert_offset()
self.save_offset_selection_bound = self.get_selection_bound_offset()
def restore_selection(self):
iter_insert = self.buffer.get_iter_at_offset(self.save_offset_insert)
iter_selection_bound = self.buffer.get_iter_at_offset(self.save_offset_selection_bound)
self.buffer.select_range(iter_insert, iter_selection_bound)
def do_expand_with_abbreviation(self, done, abbr):
self.buffer.begin_user_action()
if done:
self.buffer.undo()
self.restore_selection()
content = zen_core.expand_abbreviation(abbr, self.get_syntax(), self.get_profile_name())
if content:
self.replace_content(content, self.get_insert_offset())
self.buffer.end_user_action()
return not not content
def expand_with_abbreviation(self, window):
self.set_context(window)
self.save_selection()
done, self.last_expand = zen_dialog.main(self, window, self.do_expand_with_abbreviation, self.last_expand)
if done:
self.start_edit()
def do_wrap_with_abbreviation(self, done, abbr):
self.buffer.begin_user_action()
if done:
self.buffer.undo()
self.restore_selection()
result = zen_actions.wrap_with_abbreviation(self, abbr)
self.buffer.end_user_action()
return result
def wrap_with_abbreviation(self, window):
self.set_context(window)
self.save_selection()
done, self.last_wrap = zen_dialog.main(self, window, self.do_wrap_with_abbreviation, self.last_wrap)
if done:
self.start_edit()
def match_pair_inward(self, window):
self.set_context(window)
zen_actions.match_pair_inward(self)
def match_pair_outward(self, window):
self.set_context(window)
zen_actions.match_pair_outward(self)
def merge_lines(self, window):
self.set_context(window)
self.buffer.begin_user_action()
result = zen_actions.merge_lines(self)
self.buffer.end_user_action()
return result
def prev_edit_point(self, window=None):
if window:
self.set_context(window)
result = zen_actions.prev_edit_point(self)
self.show_caret()
return result
def next_edit_point(self, window=None):
if window:
self.set_context(window)
result = zen_actions.next_edit_point(self)
self.show_caret()
return result
def remove_tag(self, window):
self.set_context(window)
self.buffer.begin_user_action()
result = zen_actions.remove_tag(self)
self.buffer.end_user_action()
return result
def split_join_tag(self, window):
self.set_context(window)
self.buffer.begin_user_action()
result = zen_actions.split_join_tag(self)
self.buffer.end_user_action()
return result
def toggle_comment(self, window):
self.set_context(window)
self.buffer.begin_user_action()
result = zen_actions.toggle_comment(self)
self.buffer.end_user_action()
return result
|
'''
Hello world from tensorflow
'''
import tensorflow as tf
hello_world = tf.constant('Hello World Tensorflow!')
with tf.Session() as sess:
print(sess.run(hello_world)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.