content
stringlengths 5
1.05M
|
|---|
from datetime import datetime
from flask import Flask, request
from flask_restful import Resource, Api
from sqlalchemy import create_engine
from flask_jsonpify import jsonify
from flask_cors import CORS
db_connect = create_engine('sqlite:///database/enterprise.db')
app = Flask(__name__)
api = Api(app)
cor_app = CORS(app)
class Employee (Resource):
@app.route('/employees', methods=['GET'])
def get_all_employees():
conn = db_connect.connect() # connect to database
query = conn.execute("select * from employees") # This line performs query and returns json result
return {'employees': [i[0] for i in query.cursor.fetchall()]} # Fetches first column that is Employee ID
@app.route('/employees/<employee_id>', methods=['GET'])
def get_employee(employee_id):
try:
eid = int(employee_id)
except Exception as e:
return {"error": "Invalid employee ID: {}".format(e)}
conn = db_connect.connect()
query = conn.execute("select * from employees where EmployeeId =%d " % eid)
result = {'data': [dict(zip(tuple(query.keys()), i)) for i in query.cursor]}
return jsonify(result)
@app.route('/employees/create', methods=['POST'])
def create_employee():
column_names = {
"first_name": "FirstName",
"last_name": "LastName",
"address": "Address",
"birth_date": "BirthDate",
"city": "City",
"country": "Country",
"email": "Email",
"fax": "Fax",
"hire_date": "HireDate",
"phone": "Phone",
"postal_code": "PostalCode",
"reports_to": "ReportsTo",
"state": "State",
"title": "Title"
}
first_name = request.args.get('first_name')
last_name = request.args.get('last_name')
if first_name is None or last_name is None:
return {"error": "Field names are required"}
if len(first_name) == 0 or len(last_name) == 0:
return {"error": "Field names are empty"}
columns = ",".join(column_names.get(column) for column in request.args)
values = "'{}', '{}'".format(first_name, last_name)
try:
for column in request.args:
if column != "first_name" and column != "last_name":
value = request.args[column]
if column == "hire_date" or column == "birth_date":
values = values + ",'{}'".format(datetime.strptime(value, "%Y-%m-%d"))
elif column == "reports_to":
values = values + ",{}".format(int(value))
else:
values = values + ",'{}'".format(value)
except Exception as e:
return {"error": "Verify your parameters: {}".format(e)}
conn = db_connect.connect()
print(columns, values)
query = conn.execute("INSERT INTO employees (" + columns + ") VALUES ( " + values + " )")
return {"success": "Employee created, number of rows {}".format(query.rowcount)}
@app.route('/employees/delete', methods=['POST'])
def delete_employee():
employee_id = request.args.get('employee_id')
if employee_id is None:
return {"error": "Employee ID not defined"}
try:
employee_id = int(employee_id)
except Exception as e:
return {"error": "Invalid employee ID: {}".format(e)}
conn = db_connect.connect()
query = "DELETE FROM employees where EmployeeId =%d " % employee_id
query = conn.execute(query)
if query.rowcount == 0:
return {"skipped": "No employee was deleted"}
return {"success": "Number of rows deleted {}".format(query.rowcount)}
@app.route('/employees/delete/last', methods=['POST'])
def delete_last_employee():
conn = db_connect.connect()
query = conn.execute("DELETE FROM employees where EmployeeId = (SELECT MAX(EmployeeId) FROM employees)")
if query.rowcount == 0:
return {"skipped": "No employee was deleted"}
return {"success": "Number of rows deleted {}".format(query.rowcount)}
api.add_resource(Employee) # Route_1
if __name__ == '__main__':
app.run(port='5002')
|
# test try-else-finally statement
passed = 0
# base case
try:
print(1)
except:
passed = 0
print(2)
else:
print(3)
finally:
passed = 1
print(4)
# basic case that should skip else
try:
print(1)
raise Exception
passed = 0
except:
print(2)
else:
passed = 0
print(3)
finally:
print(4)
# uncaught exception should skip else
try:
try:
print(1)
raise ValueError
passed = 0
except TypeError:
print(2)
else:
passed = 0
print(3)
finally:
print(4)
except:
print('caught')
# nested within outer try
try:
print(1)
try:
print(2)
raise Exception
passed = 0
except:
print(3)
else:
passed = 0
print(4)
finally:
print(5)
except:
passed = 0
print(6)
else:
print(7)
finally:
print(8)
# nested within outer except, one else should be skipped
try:
print(1)
raise Exception
passed = 0
except:
print(2)
try:
print(3)
except:
passed = 0
print(4)
else:
print(5)
finally:
print(6)
else:
passed = 0
print(7)
finally:
print(8)
# nested within outer except, both else should be skipped
try:
print(1)
raise Exception
passed = 0
except:
print(2)
try:
print(3)
raise Exception
passed = 0
except:
print(4)
else:
passed = 0
print(5)
finally:
print(6)
else:
passed = 0
print(7)
finally:
print(8)
if (passed):
print("PASS")
else:
print("FAIL")
|
from typing import List
import gym
import numpy as np
from gym import spaces
class SwitchingWrapper(gym.Wrapper):
def __init__(self, env: gym.Env, env_index: int):
super().__init__(env)
self.env_index = env_index
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return (
observation,
reward,
done,
{**info, **{"env_index": self.env_index}},
)
|
class InsufficientConfiguration(ValueError):
"""The action can't be performed due to missing configuration."""
class WalletFileLocked(ValueError):
"""The wallet file is locked by another process."""
class ConfigurationError(ValueError):
"""A configuration parameter is incorrect."""
def __init__(self, field, error):
self.field = field
self.error = error
def __str__(self):
return "Error in '{}': {}".format(self.field, self.error)
|
import abc
import numpy as np
import mxnet as mx
from mxnet import nd
from mxnet import gluon
from typing import Union
from .distributions import BaseDistribution
from common import util
ZERO = nd.array([0.])
ONE = nd.array([1.])
class BaseBernoulli(BaseDistribution, metaclass=abc.ABCMeta):
@property
def is_reparam(self):
return False
mean = None
logits = None
def sample(self, n_samples: int = 1) -> nd.NDArray:
mean = self.get_param_not_repeated('mean')
if n_samples == 1:
return nd.sample_uniform(ZERO, ONE, shape=mean.shape) < mean
else:
shape = (n_samples,) + mean.shape
return nd.sample_uniform(ZERO, ONE, shape=shape)[0, :] < mean
def log_prob(self, x: nd.NDArray) -> nd.NDArray:
logits = self.get_param_maybe_repeated('logits')
if x.ndim > logits.ndim:
logits = nd.expand_dims(logits, 0)
return x * logits - util.softplus(logits)
class Bernoulli(BaseBernoulli):
def __init__(self, logits: nd.NDArray) -> None:
super(Bernoulli, self).__init__()
self.logits = logits
@property
def mean(self):
return util.sigmoid(self.logits)
class FastBernoulli(BaseBernoulli):
"""Fast parameterization of Bernoulli as in the survival filter paper.
Complexity O(CK) + O(CK) reduced to O(CK) + O(sK) where s in nonzeros.
References:
http://auai.org/uai2015/proceedings/papers/246.pdf
"""
def __init__(self,
positive_latent: nd.NDArray,
weight: nd.NDArray,
bias: nd.NDArray) -> None:
"""Number of classes is C; latent dimension K.
Args:
positive_latent: shape [batch_size, K] positive latent variable
weight: shape [K, C] real-valued weight
"""
super(FastBernoulli, self).__init__()
# mean_arg is of shape [batch_size, C]
self._positive_latent = positive_latent
self._weight = weight
self._bias = bias
self.logits = None
@property
def mean(self):
arg = nd.dot(self._positive_latent, nd.exp(
self._weight)) + nd.exp(self._bias)
return 1. - nd.exp(-arg)
def log_prob(self, nonzero_index):
raise NotImplementedError("Not implemented!")
def log_prob_sum(self, nonzero_index: nd.NDArray) -> nd.NDArray:
"""Returns log prob. Argument is batch of indices of nonzero classes.
log p(x) = term_1 + term_2
term_1 = sum_c log p(x_c = 0)
term_2 = sum_{c: x_c = 1} log p(x_c = 1) - log p(x_c = 0)
term_1 takes O(CK) to calculate.
term_2 takes O(CK) + O(sK) with s being the number of nonzero entries in x
"""
mean_arg = -(nd.dot(self._positive_latent, nd.exp(self._weight))
+ nd.exp(self._bias))
assert mean_arg.shape[1] == 1, "Fast Bernoulli only supports batch size 1!"
mean_arg = mean_arg[:, 0, :]
term_1 = nd.sum(mean_arg, -1)
n_factors, n_classes = self._weight.shape
# weight_nonzero = nd.Embedding(
# nonzero_index, self._weight.T, n_classes, n_factors).T
# nonzero_arg = -nd.dot(self._positive_latent, nd.exp(weight_nonzero))
# raise NotImplementedError('need to add bias lookup!')
batch_size = mean_arg.shape[0]
nonzero_arg = nd.Embedding(
nonzero_index, mean_arg.T, n_classes, batch_size).T
term_2 = nd.sum(nd.log(1. - nd.exp(nonzero_arg)) - nonzero_arg, -1)
res = term_1 + term_2
return nd.expand_dims(res, 1)
|
import DonkeySimple.DS as ds
import os, json, imp, getpass
from DonkeySimple.DS.ds_creator import create_ds
from DonkeySimple.DS import KnownError
def get_status(path):
"""
Prints the current state of the site.
"""
_chdir(path)
def print_list(title, values, indent = 2):
print '%s%s:' % (' '*indent, title)
new_indent = indent + 4
for v in values:
print ' '*new_indent, v
def print_con(title, con, indent = 2):
print_list(title, [cf.display for cf in con.cfiles.values()], indent)
import settings
print ' ================'
print ' %s Status' % settings.SITE_NAME
print ' ================'
repros = [r for r, _ in ds.get_all_repos()]
print_list('Repos', repros)
print_con('Pages', ds.con.Pages())
print_con('Templates', ds.con.Templates())
print_con('Static Files', ds.con.Statics())
if os.path.exists(ds.USERS_FILE):
users = _get_users()
user_info = []
for user, info in users.items():
admin = ('', ' (admin)')[info['admin']]
user_info.append('%s%s, last seen: %s' % (user, admin, info['last_seen']))
print_list('Web Interface Users', user_info)
def generate_site(path):
_chdir(path)
print ' ==============='
print ' GENERATING SITE'
print ' ==============='
sg = ds.SiteGenerator()
sg.generate_entire_site()
print ''
print ' Site Generated Successfully'
print ' ---------------------------'
def runserver(path):
_chdir(path)
import DonkeySimple.WebInterface as wi
wi.run_dev_server()
def edituser(path):
_chdir(path)
import DonkeySimple.WebInterface as wi
users = _get_users()
user_names = users.keys()
print 'Users:'
for i, u in enumerate(user_names):
print ' %d: %s' % (i, u)
username = user_names[int(raw_input('Choose user id: '))]
print 'User: %s' % username
user = users[username]
for k,v in user.items():
print ' %s: %r' % (k, v)
print 'Actions:'
actions = ['enter new password',
'reset password and print',
'reset password and email',
'cancel']
for i, a in enumerate(actions):
print ' %d: %s' % (i, a)
action = actions[int(raw_input('Choose Action: '))]
print 'Action: %s' % action
if action == 'enter new password':
pw1 = getpass.getpass('Enter new password: ')
pw2 = getpass.getpass('Repeat: ')
if pw1 != pw2:
raise KnownError('Passwords do not match')
pw = pw1
if len(pw) < ds.MIN_PASSWORD_LENGTH:
raise KnownError('Password must be at least %d characters in length' % ds.MIN_PASSWORD_LENGTH)
auth = wi.UserAuth()
user = auth.pop_user(username)
auth.add_user(username, user, pw)
print 'Password Changed'
elif action in ['reset password and print', 'reset password and email']:
auth = wi.UserAuth()
user = auth.pop_user(username)
pw = auth.new_random_password()
if action == 'reset password and print':
print 'new password: %s' % pw
auth.add_user(username, user, pw)
else:
email = user['email']
if '@' in email:
from DonkeySimple.DS.send_emails import password_email
success, msg = password_email(email, 'the site', username, pw)
if success:
auth.add_user(username, user, pw)
print 'Password email sent'
else:
raise KnownError('Error sending email, not changing password')
def _get_users():
with open(ds.USERS_FILE, 'r') as handle:
users = json.load(handle)
return users
def _chdir(path):
def find_path():
look_for = 'settings.py'
if path is None:
search_dirs = ('edit', '.', '..', '../..', '../../..')
for d in search_dirs:
if os.path.exists(os.path.join(d,look_for)):
return d
else:
if not all([os.path.exists(os.path.join(path,f)) for f in look_for]):
raise KnownError(
'Path supplied "%s" does not appear to be the "edit" folder of a donkey simple site tree.'\
% path)
return path
raise KnownError("No path supplied and you don't appear to be in a site tree now.")
new_path = find_path()
if new_path != '.':
print 'changing working directory to "%s"' % new_path
os.chdir(new_path)
return new_path
|
import numpy as np
import six
from scipy import optimize
import chainer
from chainer import cuda, Function, gradient_check, Variable
from chainer import optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
def train(x_train, t_train, epoch, model, optimizer, batchsize=5000, xp=np):
N = len(x_train)
t_train[t_train == 0] = -1
for ep in six.moves.range(1, epoch + 1):
# training
perm = np.random.permutation(N)
sum_accuracy = 0
sum_loss = 0
for i in six.moves.range(0, N, batchsize):
model.zerograds()
x = Variable(xp.array(x_train[perm[i:i + batchsize]], xp.float32))
t = Variable(xp.array([t_train[perm[i:i + batchsize]]], xp.float32).T)
# Pass the loss function (Classifier defines it) and its arguments
g = model(x)
loss = F.mean(F.log(1+F.exp(-t*g)))
loss.backward()
optimizer.update()
return model, optimizer
def train_pu(x_train, t_train, x_test, t_test, pi, epoch, model, optimizer, batchsize=5000, xp=np):
N = len(x_train)
loss_list = []
acc1_list = []
acc2_list = []
pre1_list = []
rec1_list = []
pre2_list = []
rec2_list = []
for ep in six.moves.range(1, epoch + 1):
# training
perm = np.random.permutation(N)
loss_step = 0
count = 0
for i in six.moves.range(0, N, batchsize):
model.zerograds()
x = Variable(xp.array(x_train[perm[i:i + batchsize]], xp.float32))
t_temp = t_train[perm[i:i + batchsize]]
t_temp = xp.array([t_temp], xp.float32).T
t = Variable(t_temp)
#np = xp.sum(t)
#nu = batchsize - n1
# Pass the loss function (Classifier defines it) and its arguments
g = model(x)
positive, unlabeled = t_temp == 1, t_temp == 0
n_p = max([1, xp.sum(positive)])
n_u = max([1, xp.sum(unlabeled)])
gp = F.log(1+F.exp(-g))
gu = F.log(1+F.exp(g))
lossp = pi*F.sum(gp*positive)/n_p
lossn = F.sum(gu*unlabeled)/n_u - pi*F.sum(gu*positive)/n_p
if lossn.data < 0:
loss = -lossn
else:
loss = lossp + lossn
loss.backward()
optimizer.update()
loss_step += loss.data
count += 1
loss_step /= count
loss_list.append(loss_step)
acc1,pre1,rec1 = test(x_test, t_test, model, quant=False, xp=xp, batchsize=batchsize)
acc2,pre2,rec2 = test(x_test, t_test, model, quant=True, pi=pi, xp=xp, batchsize=batchsize)
acc1_list.append(acc1)
acc2_list.append(acc2)
pre1_list.append(pre1)
rec1_list.append(rec1)
pre2_list.append(pre2)
rec2_list.append(rec2)
print("epoch", ep)
print(acc1)
print(acc2)
print(pre1)
print(rec1)
print(pre2)
print(rec2)
loss_list = np.array(loss_list)
acc1_list = np.array(acc1_list)
acc2_list = np.array(acc2_list)
pre1_list = np.array(pre1_list)
rec1_list = np.array(rec1_list)
pre2_list = np.array(pre2_list)
rec2_list = np.array(rec2_list)
return model, optimizer, loss_list, acc1_list, acc2_list, pre1_list, rec1_list, pre2_list, rec2_list
def test(x, t, model, quant=True, pi=False, xp=np, batchsize=100):
theta = 0
f = np.array([])
for i in six.moves.range(0, len(x), batchsize):
X = Variable(xp.array(x[i:i + batchsize], xp.float32))
p = chainer.cuda.to_cpu(model(X).data).T[0]
f = np.append(f, p, axis=0)
if quant is True:
temp = np.copy(f)
temp = np.sort(temp)
theta = temp[np.int(np.floor(len(x)*(1-pi)))]
pred = np.zeros(len(x))
pred[f > theta] = 1
acc = np.mean(pred == t)
pre = np.sum((pred == t)[pred==1])/np.sum(pred==1)
rec = np.sum((pred == t)[t==1])/np.sum(t==1)
return acc, pre, rec
|
#
# Copyright (c) 2015-2016, Yanzi Networks AB.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holders nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# A ws demo plugin module
class DemoPlugin:
def get_commands(this, wsdemo):
return [];
def handle_cmd(this, wsdemo, cmd):
print "Default handler called - will do nothing: ", cmd
|
import os
import types
import numpy as np
import torch
class BaseParams(object):
def __init__(self):
self.set_params()
self.compute_helper_params()
def set_params(self):
self.standardize_data = False
self.model_type = None
self.log_to_file = True
self.dtype = torch.float
self.eps = 1e-12
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.rand_seed = 123456789
self.rand_state = np.random.RandomState(self.rand_seed)
self.workspace_dir = os.path.expanduser("~")+"/Work/"
self.data_dir = self.workspace_dir+"/Datasets/"
self.out_dir = self.workspace_dir+"/Projects/"
def compute_helper_params(self):
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 14 22:09:55 2017
@author: mussie
"""
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
def main():
tickers = ['AAPL','MSFT','^GSPC']
data_source = 'yahoo'
start_date = '2000-01-01'
end_date = '2016-12-31'
panel_data = data.DataReader(tickers, data_source, start_date, end_date)
# Get the adjusted closing price
adj_close = panel_data.ix['Adj Close']
# Get all weekdays between 09-01-2001 and 09-30-2001
all_weekdays = pd.date_range(start=start_date,end=end_date,freq='B')
#Align the existing prices in adj_close with the new set of dates
adj_close = adj_close.reindex(all_weekdays)
# Fill the missing value by replacing them with the latest available price for each instruments
adj_close = adj_close.fillna(method='ffill')
#Get MSFT time series
msft = adj_close.ix[:,'MSFT']
#Calculate the 20 and 100 dys moving averages of the closing price
#short_rolling_msft = msft.rolling(window=30).mean()
#long_rolling_msft = msft.rolling(window=175).mean()
mat_plot_adj_close(msft)
#Plotting
def plot_adj_close():
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(msft.index,msft,lablel='MSFT')
ax.plot(short_rolling_msft.index,short_rolling_msft,label='20 days rolling')
ax.plot(long_rolling_msft.index,long_rolling_msft,label='100 days rolling')
ax.set_xlabel('Date')
ax.set_ylabel('Adjusted closing price($)')
ax.legend()
def mat_plot_adj_close(df):
short_rolling_msft = df.rolling(window=30).mean()
long_rolling_msft = df.rolling(window=175).mean()
plt.plot(df.index,df,label='MSFT')
plt.plot(short_rolling_msft.index,short_rolling_msft,label='20 days rolling')
plt.plot(long_rolling_msft.index,long_rolling_msft,label='100 days rolling')
plt.show()
if __name__ == '__main__': main()
|
from openrobotics.robomath import *
class StewartRobot(object):
def __init__(self, param: list) -> None:
"""
构造Stewart机器人
Parameters
----------
param: list
[ 静平台半径, 动平台半径, 静铰点偏角, 动铰点偏角 ]
"""
super().__init__()
assert len(param) == 4, "参数的数量必须等于4"
self.set_param(param)
def set_param(self, param: list):
self._param = np.array(param)
# 计算静平台铰点位置
self.ab = np.zeros((3, 6)) # ab
daf = [0-param[2], 0+param[2],
120-param[2], 120+param[2],
240-param[2], 240+param[2]]
ab_i = np.array([[param[0], 0, 0]]).T
for i in range(6):
self.ab[:, i] = np.dot(euler_to_rot([0,0,daf[i]]), ab_i).reshape(3)
# 计算动平台铰点的相对位置
self.cd_c = np.zeros((3, 6))
dam = [0-param[3], 0+param[3],
120-param[3], 120+param[3],
240-param[3], 240+param[3]]
cd_i = np.array([[param[1], 0, 0]]).T
for i in range(6):
self.cd_c[:, i] = np.dot(euler_to_rot([0,0,dam[i]]), cd_i).reshape(3)
def calc_forward_kinematics(self, joint_pos:list):
# newton-raphson 迭代阈值
tol_fun = 1e-3
tol_ep = 1e-3
# 最大迭代次数
max_iter = 10
num_iter = 0
jp = joint_pos
ab = self.ab
cd_c = self.cd_c
ep = np.array([0,0,0,0,0,0])
while num_iter < max_iter:
R = euler_to_rot(ep[3:6])
euler = ep[3:6]*TO_RAD
ac = np.array([ep[0:3]]).T
bc = ac-ab
cd = np.zeros((3,6))
for i in range(6):
cd[:,i] = np.dot(R,cd_c[:,i])
bd = bc+cd
jp2_t = np.sum(np.square(bd),0)
fun = -(jp2_t-np.square(jp))
sum_fun = np.sum(np.abs(fun))
if sum_fun < tol_fun:
break
df_dep = np.zeros((6,6))
df_dep[:,0:3] = 2*bd.T
for i in range(6):
df_dep[i, 5] = 2*(-bc[0,i]*cd[1,i] + bc[1,i]*cd[0,i]) #dfda4
df_dep[i, 4] = 2*((-bc[0,i]*math.cos(euler[2]) + bc[1,i]*math.sin(euler[2]))*cd[2,i] \
- (cd_c[0,i]*math.cos(euler[1]) + cd_c[1,i]*math.sin(euler[1])*math.sin(euler[0]))*bc[2,i]) #dfda5
df_dep[i, 3] = 2*cd_c[1,i]*(np.dot(bc[:,i],R[:,2])) #dfda
delta_ep = np.linalg.solve(df_dep,fun)
delta_ep[3:6] = delta_ep[3:6]*TO_DEG
if abs(np.sum(delta_ep)) < tol_ep:
break
epu = ep+delta_ep
ep = epu
num_iter = num_iter+1
## 记录结构点, 用于绘图
self.bd = bd
return ep
def calc_inverse_kinematics(self, end_pos:list):
ab = self.ab
cd_c = self.cd_c
ep = end_pos
R = euler_to_rot(ep[3:6])
ac = ep[0:3]
cd = np.zeros((3,6))
for i in range(6):
cd[:,i] = np.dot(R,cd_c[:,i])
ad = ac+cd
bd = ad-ab
jl = np.sum(np.square(bd),0)**0.5
self.bd = bd
return jl
if __name__ == "__main__":
stewart = StewartRobot([500.0, 200.0, 5.0, 55.0])
end_pos = stewart.calc_forward_kinematics([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
joint_pos = stewart.calc_inverse_kinematics(end_pos)
print(end_pos)
print(joint_pos)
|
import pytest
from rest_framework.authentication import exceptions
from supportal.app.authentication_backend import CognitoJWTAuthentication, validate_jwt
from supportal.app.models import APIKey
from supportal.tests import utils
CLIENT_ID = "1234abcdef"
@pytest.fixture
def api_key(superuser):
return APIKey.objects.create(client_id=CLIENT_ID, user=superuser)
@pytest.fixture
def backend():
return CognitoJWTAuthentication()
@pytest.mark.django_db
def test_access_token_auth(rf, superuser, api_key, backend):
token = utils.create_access_jwt(api_key.client_id)
req = rf.get("/foo", HTTP_AUTHORIZATION=utils.auth_header(token))
user, token_data = backend.authenticate(req)
assert user == superuser
assert token_data["client_id"] == CLIENT_ID
@pytest.mark.django_db
def test_id_token_auth(rf, user, backend):
token = utils.create_id_jwt(user)
req = rf.get("/foo", HTTP_AUTHORIZATION=utils.auth_header(token))
res_user, token_data = backend.authenticate(req)
assert res_user == user
assert token_data["cognito:username"] == "testuser"
@pytest.mark.django_db
def test_that_kid_jwks_misalignment_throws_403(user):
with pytest.raises(exceptions.AuthenticationFailed):
assert validate_jwt(
utils.create_id_jwt(user, key_id="this is not going to work")
)
@pytest.mark.django_db
def test_inactive_users_fail_auth(rf, user, backend):
user.is_active = False
user.save()
with pytest.raises(exceptions.AuthenticationFailed):
token = utils.create_id_jwt(user)
req = rf.get("/foo", HTTP_AUTHORIZATION=utils.auth_header(token))
backend.authenticate(req)
@pytest.mark.django_db
def test_user_impersonation(rf, user, roslindale_leader_user, backend):
user.is_admin = True
user.impersonated_user = roslindale_leader_user
user.save()
u, _ = backend.authenticate(rf.get("/foo", **utils.id_auth(user)))
assert u == roslindale_leader_user
@pytest.mark.django_db
def test_non_admins_cannot_impersonate(rf, user, roslindale_leader_user, backend):
user.is_admin = False
user.impersonated_user = roslindale_leader_user
user.save()
u, _ = backend.authenticate(rf.get("/foo", **utils.id_auth(user)))
assert u != roslindale_leader_user
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-05 20:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0002_auto_20160904_0750'),
]
operations = [
migrations.AddField(
model_name='hello',
name='ignore',
field=models.BooleanField(default=False, help_text='Ignore hello request'),
),
]
|
# -*- coding: utf-8 -*-
from .utils import name_parser_factory
class Enum(object):
""" Enum """
def __init__(
self,
plugin_name,
package,
template_env,
pb_enum,
enum_docs,
parent_struct=None):
self._plugin_name = name_parser_factory.create(plugin_name)
self._package = name_parser_factory.create(package)
self._template = template_env.get_template("enum.j2")
self._enum_description = enum_docs['description'].strip(
) if enum_docs else None
self._name = name_parser_factory.create(pb_enum.name)
self._values = []
self._parent_struct = parent_struct
value_id = 0
for value in pb_enum.value:
# If the enum value contains the enum name as a prefix, remove it.
# For example, if the enum "GpsFix" has a value called "GPS_FIX_NO_GPS",
# we remove the prefix and the value becomes "NO_GPS"
tmp_value_name = name_parser_factory.create(value.name)
if tmp_value_name.upper_camel_case.startswith(self._name.upper_camel_case):
value_name = name_parser_factory.create(tmp_value_name.lower_snake_case[len(self._name.lower_snake_case) + 1:])
has_prefix = True
else:
value_name = tmp_value_name
has_prefix = False
self._values.append({'name': value_name, 'description': enum_docs['params'][value_id], 'has_prefix': has_prefix})
value_id += 1
def __repr__(self):
return self._template.render(plugin_name=self._plugin_name,
package=self._package,
enum_description=self._enum_description,
name=self._name,
values=self._values,
parent_struct=self._parent_struct)
@staticmethod
def collect_enums(
plugin_name,
package,
enums,
template_env,
docs,
parent_struct=None):
_enums = {}
enum_id = 0
for enum in enums:
_enums[enum.name] = Enum(plugin_name,
package,
template_env,
enum,
docs['enums'][enum_id] if docs else None,
parent_struct)
enum_id += 1
return _enums
|
import os
import torch
try:
from apex import amp
amp.register_float_function(torch, 'matmul')
except ImportError:
print('No apex for fp16...')
# combine image with origin images
def image_combine(source, target, mask):
res = source * (1 - mask) + target * mask # [b,3,h,w]
return res
def load_model(model, amp=None):
g_path = model.g_path + '_last.pth'
if model.config.restore:
if os.path.exists(g_path):
print('Loading %s generator...' % g_path)
data = torch.load(g_path, map_location='cpu')
model.g_model.load_state_dict(data['g_model'])
if model.config.restore:
model.g_opt.load_state_dict(data['g_opt'])
model.iteration = data['iteration']
# for _ in range(model.iteration):
# g_sche.step()
# d_sche.step()
else:
print(g_path, 'not Found')
raise FileNotFoundError
d_path = model.d_path + '_last.pth'
if model.config.restore: # D is only use for training
if os.path.exists(d_path):
print('Loading %s discriminator...' % d_path)
data = torch.load(d_path, map_location='cpu')
model.d_model.load_state_dict(data['d_model'])
if model.config.restore:
model.d_opt.load_state_dict(data['d_opt'])
else:
print(d_path, 'not Found')
raise FileNotFoundError
if amp is not None and model.config.float16 and 'amp' in data:
amp.load_state_dict(data['amp'])
else:
print('No need for discriminator during testing')
return model, amp
def save_model(model, prefix=None, g_opt=None, d_opt=None, amp=None, iteration=None, n_gpu=1):
if prefix is not None:
save_g_path = model.g_path + "_{}.pth".format(prefix)
save_d_path = model.d_path + "_{}.pth".format(prefix)
else:
save_g_path = model.g_path + ".pth"
save_d_path = model.d_path + ".pth"
print('\nsaving {}...\n'.format(prefix))
save_g = model.g_model.module if n_gpu > 1 else model.g_model
save_d = model.d_model.module if n_gpu > 1 else model.d_model
torch.save({'iteration': model.iteration if iteration is None else iteration,
'g_model': save_g.state_dict(),
'g_opt': g_opt.state_dict(),
'amp': amp.state_dict() if amp is not None else None},
save_g_path)
torch.save({'d_model': save_d.state_dict(),
'd_opt': d_opt.state_dict()},
save_d_path)
|
from time import time
timer_length = input(
"How many minutes and seconds, enter as minutes seconds, enter 0 if no time in that section, do not use commas: ").split()
timer_length = list(map(float, timer_length))
def conv_seconds(minutes) -> float:
return minutes * 60
total_seconds = conv_seconds(timer_length[0]) + timer_length[1]
input("Press enter to start: ")
time1 = time()
while time() - time1 < total_seconds:
print(round(time() - time1, 2), end="\r")
print("\nDone")
|
import matplotlib.pyplot as plt
import numpy as np
import pyvista as pv
sst = pv.read("pdata_xy_sst_t0.vtk")
cmap = "fire" # colorcet (perceptually accurate) color maps
sargs = dict(
shadow=True,
n_labels=5,
italic=False,
fmt="%.1f",
font_family="courier",
# nan_annotation=True,
vertical=True,
)
p = pv.BackgroundPlotter()
#p.add_mesh(sst, scalars="faces", show_edges=True, cmap=cmap, show_scalar_bar=True)
p.add_mesh_threshold(sst, scalars="lats", invert=True, title="latitude", cmap=cmap, show_edges=True, show_scalar_bar=True, scalar_bar_args=sargs)
p.add_text("C48 Latitude Threshold", font_size=10, shadow=True, font="courier")
p.show_axes()
#p.show_grid()
p.scalar_bar.SetTitle("Latitude")
#p.add_scalar_bar(**sargs)
p.camera_position = "yz"
|
"""
Common signal metrics routines
==============================
ssd
sd
"""
__all__ = ['sdd', 'sd']
from .norms import ssd
from .norms import sd
|
""" QuickUnion is a lazy approach.
*Limitations:*
- Trees can get too tall
- Find too expensive (could be N array accesses)
"""
class QuickUnion:
""" data[i] is parent of i. Root of i is id[id[...id[i]...]] """
def __init__(self, n):
""" Initializing list of size n where value is same as index
Here it means that each node is a root of it's own tree
Time Complexity: O(n)
:param n: number of elements
"""
self.data = []
for i in range(n):
self.data.append(i)
def root(self, elem):
""" Finding the root of element
:param elem: element of which root is needed
:return: root of elem
"""
while elem != self.data[elem]:
elem = self.data[elem]
return elem
def connected(self, elem1, elem2):
""" elem1 and elem2 are connected iff they have same root
:param elem1: element 1
:param elem2: element 2
:return: returns true iff two elem1 and elem2 are connected, else
false
:rtype: bool
"""
return self.root(elem1) == self.root(elem2)
def union(self, elem1, elem2):
""" To merge components containing elem1 and elem2, set the id of
elem1's root to the id of elem2's root
:param elem1: element 1
:param elem2: element 2
"""
root_elem1 = self.root(elem1)
root_elem2 = self.root(elem2)
self.data[root_elem1] = root_elem2
def main():
""" operational function """
maze = QuickUnion(10)
maze.union(4, 3)
maze.union(3, 8)
maze.union(6, 5)
maze.union(9, 4)
maze.union(2, 1)
print("is 0-7 connected: ", maze.connected(0, 7))
print("is 8-9 connected: ", maze.connected(8, 9))
maze.union(5, 0)
maze.union(7, 2)
maze.union(6, 1)
maze.union(1, 0)
print("is 0-7 connected: ", maze.connected(0, 7))
if __name__ == "__main__":
main()
|
import csv
import sys
import os
from PIL import Image
def main():
name_of_script = sys.argv[0] # should be setup_ground_truth.py
kaggle_folder = sys.argv[1].strip()
with open(f'{kaggle_folder}/written_name_test.csv', mode='r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
image_filename = row[0]
if os.path.exists(f'{kaggle_folder}/{image_filename}'): # allows user to use only sample of images if they want
ground_truth_text = row[1]
ground_truth_filename = image_filename.replace("jpg", "gt.txt")
with open(f'{kaggle_folder}/{ground_truth_filename}', mode='w') as ground_truth_file:
ground_truth_file.write(ground_truth_text)
if "png" not in image_filename:
im = Image.open(f'{kaggle_folder}/{image_filename}')
im.save(f'{kaggle_folder}/{image_filename.replace("jpg", "png")}', dpi=(300,300))
if __name__ == "__main__":
main()
|
import nltk
import os
from nltk import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
import numpy
import tflearn
import tensorflow as tf
import random
import json
import pickle
with open("intents.json") as file:
data = json.load(file)
words = []
labels = []
docs_x = []
docs_y = []
for intent in data['intents']:
for pattern in intent['patterns']:
pattern = pattern.lower()
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent['tag'])
if intent['tag'] not in labels:
labels.append(intent['tag'])
stemmer = LancasterStemmer()
words = [stemmer.stem(w.lower()) for w in words if w not in "?"]
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, doc in enumerate(docs_x):
bag = []
wrds = [stemmer.stem(w) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = numpy.array(training)
output = numpy.array(output)
with open("data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
tf.reset_default_graph()
net = tflearn.input_data(shape = [None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
model.fit(training, output, n_epoch=200, batch_size=8, show_metric=True)
model.save("model.tflearn")
|
""" ************************************************
* fileName: optim.py
* desc: optimizers
* author: mingdeng_cao
* date: 2021/12/06 15:24
* last revised: None
************************************************ """
from torch.optim import Adam, SGD, AdamW
from .build import OPTIMIZER_REGISTRY
# Register the optimizer
OPTIMIZER_REGISTRY.register(Adam)
OPTIMIZER_REGISTRY.register(SGD)
OPTIMIZER_REGISTRY.register(AdamW)
|
# from collections import defaultdict
from django.shortcuts import redirect
from django.contrib.auth.decorators import (
permission_required,
user_passes_test,
login_required,
REDIRECT_FIELD_NAME
)
from django.core.context_processors import csrf
from django.forms import ModelChoiceField, IntegerField
from django.template import RequestContext
# from django.utils.translation import ugettext as _
from pyalp.utils import render_to_response
from pizza import gettext as _
# import djmoney.forms
from vanilla import CreateView
from floppyforms import forms
from pizza.models import Pizza, PizzaOrder
from flags.registry import flag_registry
def deny_if_flag_disabled(flag_name):
def internal(function):
def actual(form, request, *args, **kwargs):
if flag_registry.is_flag_enabled(flag_name):
return function(form, request, *args, **kwargs)
else:
redirect('index')
return actual
return internal
def login_required_with_form(
function=None, redirect_field_name=REDIRECT_FIELD_NAME,
login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
def internal(form, request, *args, **kwargs):
def wrapper(*q, **w):
import pudb
pu.db
return function(form, request, *args, **kwargs)
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
login_url=login_url,
redirect_field_name=redirect_field_name
)
return actual_decorator(wrapper)(request)
return internal
@login_required
def index(request):
orders = PizzaOrder.objects.filter(orderer=request.user)
return render_to_response(
'ordered_pizzas.html',
{'orders': orders}
)
class OrderForm(forms.Form):
pizza = ModelChoiceField(
Pizza.objects.filter(enabled=True)
)
pizza.label = (
_('desc_pizzaid') +
" (<a href=\"pizza_list.php\">" +
_('link_pizza_list') +
"</a>)"
)
quantity = IntegerField()
quantity.label = _("desc_quantity")
class OrderPizzaView(CreateView):
# @deny_if_flag_disabled('pizza_orders')
# @login_required
def post(self, request):
form = OrderForm(request.POST)
if form.is_valid():
order = PizzaOrder(
pizza=form.cleaned_data['pizza'],
orderer=request.user,
quantity=form.cleaned_data['quantity']
)
order.save()
import pudb
pu.db
return redirect('index')
else:
return render_to_response('order_pizzas.html')
# @deny_if_flag_disabled('pizza_orders')
# @login_required
def get(self, request):
args = csrf(request)
args['order_form'] = OrderForm()
args['pizza_orders_locked'] = flag_registry.is_flag_enabled(
'pizza_orders_locked')
return render_to_response(
'order_pizzas.html', args,
context_instance=RequestContext(request)
)
# @method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(OrderPizzaView, self).dispatch(*args, **kwargs)
# @deny_if_flag_disabled('pizza_orders')
# @login_required
# def order_pizza(request):
# import pudb
# pu.db
# if request.method == 'POST':
# form = OrderForm(request.POST)
# print(form)
# if form.is_valid():
# order = PizzaOrder(
# pizza=form.cleaned_data['pizza'],
# orderer=request.user,
# quantity=form.cleaned_data['quantity']
# )
# order.save()
# return redirect('index')
# else:
# return render_to_response('order_pizzas.html')
# else:
# args = {'order_form': OrderForm()}
# args.update(csrf(request))
# return render_to_response('order_pizzas.html', args)
def pizza_details(request):
args = {
'pizza_options': Pizza.objects.filter(enabled=True)
}
return render_to_response('pizza_list.html', args)
@login_required
@permission_required('pizza.can_access_orders')
def admin_pizza_order_list(request):
orders = PizzaOrder.objects.filter(
paid=True,
delivered=False
)
summary = {}
pizzas = {}
totals = {}
# import pudb
# pu.db
# for order in orders:
# summary[order['id']]['delivered'] += (order['delivered'] * order['quantity'])
# totals['delivered'] += (order['delivered'] * order['quantity'])
# if (order['delivered'] > 0):
# summary[order['id']]['quantity'] += 0
# totals['quantity'] += 0
# else:
# summary[order['id']]['quantity'] += order['quantity']
# totals['quantity'] += order['quantity']
# if order['paid']:
# summary[order['id']]['paid'] += (order['paid'] * order['quantity'])
# totals['paid'] += (order['paid'] * order['quantity'])
# summary[order['id']]['price'] += 0
# totals['price'] += 0
# else:
# summary[order['id']]['paid'] += 0
# totals['paid'] += 0
# summary[order['id']]['price'] += (order['price']*order['quantity'])
# totals['price'] += (order['price']*order['quantity'])
# if order['id'] not in pizzas:
# pizzas[order['id']] = order['pizza']
args = {
'pizza_orders': orders
}
return render_to_response('admin_pizza_order_list.html', args)
|
class Vertex:
def __init__(self,id):
self.attributes = {}
self.attributes['id'] = id
def __str__(self):
return str(self.attributes)
def new_copy(self):
return Vertex(self.attributes['id'])
def set(self,key,value):
self.attributes[key] = value
def get(self,key):
return self.attributes[key]
class Graph:
def __init__(self):
self.vertices = {}
self.id_to_v = {}
self.edge_attributes = {}
def __str__(self):
s = ''
for v in self.vertices:
s += str(v)
s += '\n\n'
return s
def add_vertex(self,v):
self.vertices[v] = []
self.id_to_v[v.get('id')] = v
def adjacent(self,v):
return self.vertices[v]
def add_edge(self,v1,v2, s):
self.vertices[v1].append(v2)
edge_attributes[(v1,v2)] = {}
edge_attributes[(v1,v2)]['sound'] = s
|
#Written for Python 3.4.2
data = [line.rstrip('\n') for line in open("input.txt")]
blacklist = ["ab", "cd", "pq", "xy"]
vowels = ['a', 'e', 'i', 'o', 'u']
def contains(string, samples):
for sample in samples:
if string.find(sample) > -1:
return True
return False
def isNicePart1(string):
if contains(string, blacklist):
return False
vowelcount = 0
for vowel in vowels:
vowelcount += string.count(vowel)
if vowelcount < 3:
return False
for i in range(len(string)-1):
if string[i] == string[i+1]:
return True
return False
def isNicePart2(string):
for i in range(len(string)-3):
pair = string[i]+string[i+1]
if string.find(pair, i+2) > -1:
for j in range(len(string)-2):
if string[j] == string[j+2]:
return True
return False
return False
nicePart1Count = 0
nicePart2Count = 0
for string in data:
if isNicePart1(string):
nicePart1Count += 1
if isNicePart2(string):
nicePart2Count += 1
print(nicePart1Count)
print(nicePart2Count)
#Test cases part1
##print(isNicePart1("ugknbfddgicrmopn"), True)
##print(isNicePart1("aaa"), True)
##print(isNicePart1("jchzalrnumimnmhp"), False)
##print(isNicePart1("haegwjzuvuyypxyu"), False)
##print(isNicePart1("dvszwmarrgswjxmb"), False)
#Test cases part1
##print(isNicePart2("qjhvhtzxzqqjkmpb"), True)
##print(isNicePart2("xxyxx"), True)
##print(isNicePart2("uurcxstgmygtbstg"), False)
##print(isNicePart2("ieodomkazucvgmuy"), False)
|
import threading
class loop(threading.Thread):
def __init__(self, motor, encoder, pid):
threading.Thread.__init__(self)
self._encoder = encoder
self._motor = motor
self._pid = pid
self.sp = 0
self.pv = 0
self._stop_requested = threading.Event()
self.setDaemon(True)
def __enter__(self):
pass
def __exit__(self, exec_type, exec_value, traceback):
self.cancel()
def run(self):
while not self._stop_requested.is_set():
self.pv = self._encoder.RPM()
self.sig = self._pid.calculate(self.pv)
self.err = self._pid._error
self._motor.adjust_power(self.sig)
def cancel(self):
self._stop_requested.set()
self._motor.stop()
self._encoder.cancel()
if __name__ == "__main__":
from read_RPM import reader
from motor import motor
from pid import pid
import threading
import pigpio
import time
pulse_per_revolution = 20
reader_gpio = 24
fwd_gpio = 19
rev_gpio = 20
weight = 0.5
pi = pigpio.pi()
reader = reader(pi, reader_gpio, pulse_per_revolution, weight)
motor = motor(pi, fwd_gpio, rev_gpio)
pid = pid()
pid.setpoint = 150
loop = loop(motor, reader, pid)
RUN_TIME = 30.0
SAMPLE_TIME = 2.0
loop.start()
start = time.time()
while (time.time() - start) < RUN_TIME:
time.sleep(SAMPLE_TIME)
print("RPM={}\tdelta={}\terr={}".format(int(loop.pv+0.5), int(loop.sig+0.5),int(loop.err+0.5)))
loop.cancel()
|
##!/usr/bin/env python3
# By Paulo Natel
# Jan/2021
# pnatel@live.com
# Check README.md for more information
# Importing required libraries
from pathlib import Path
import os.path
import shutil
import random
import logging
import datetime
import csv
# Running as standalone or part of the application
if __name__ == '__main__' or __name__ == 'csv_module':
import app_config as cfg
from loggerinitializer import initialize_logger
import setup as stp
from file_class import Photo
stp.setup()
else:
import engine.app_config as cfg
from engine.loggerinitializer import initialize_logger
from engine.file_class import Photo
cfg.load_config()
initialize_logger(cfg._logPath)
def add_record_csv(recordDict, csv_file):
"""
docstring
"""
recordDict["counter"] = int(recordDict["counter"]) + 1
with open(csv_file, 'a+') as file:
headers = []
for key in recordDict.keys():
headers.append(key)
writer = csv.DictWriter(file, fieldnames=headers)
# Check if the file is empty and add the header
file.seek(0)
if not len(file.read(100)):
writer.writeheader()
logging.warning('CSV is empty, generating headers')
# Read the file and dump test if record is not already in the file
file.seek(0)
read = file.read()
# print(read)
if recordDict["filename"] not in read:
writer.writerow(recordDict)
logging.info('adding row for: ' + recordDict["filename"])
else:
update_record_csv(recordDict["filename"], csv_file,
counter=recordDict["counter"], pruned=False)
logging.debug('File already in CSV: ' + recordDict["filename"])
def read_CSV(csv_file):
"""
docstring
"""
temp = []
with open(csv_file, 'r') as file:
records = csv.DictReader(file)
for record in records:
temp.append(record)
return temp
def remove_record_csv(filename, csv_file):
"""
docstring
"""
read = read_CSV(csv_file)
for item in read:
if filename in item['filename']:
record_found = item
# print('\n\tremove_record_csv', filename, item['filename'],
# filename in item['filename'])
logging.debug('removing: ' + filename + ' from ' + csv_file)
read.remove(item)
# print('\n\n', read)
with open(csv_file, 'w') as writeFile:
headers = []
for key in read[0].keys():
headers.append(key)
writer = csv.DictWriter(writeFile, fieldnames=headers)
writer.writeheader()
writer.writerows(read)
return record_found
return False
def update_record_csv(filepath, csv_file, **kargs):
"""
docstring
"""
filename = os.path.basename(filepath)
modified = False
logging.debug(f'update_record_csv param: {filename}, {csv_file}, {kargs}')
records = read_CSV(csv_file)
for record in records:
temp = dict(record)
if filename in temp['filename']:
logging.debug('Found: ' + filename +
' Counter: ' + temp['counter'])
for key, value in temp.items():
if key in kargs and key != 'counter':
# print(f'{key:25}: {value}, {key in kargs}, \
# {temp[key]} != {kargs[key]}')
if temp[key] != str(kargs[key]):
modified = True
temp[key] = kargs[key]
logging.debug(f'{filename} - {key} changing \
from {value} to {kargs[key]}')
else:
logging.error(f'param in record ({temp[key]}) \
is the same as provided: {kargs[key]}')
break
else:
logging.debug(f"{filename} differ than {temp['filename']}")
if modified:
removed_record = remove_record_csv(temp['filename'], csv_file)
if removed_record:
if temp['pruned']:
# adjustment of counter due artificial remove/add
temp['counter'] = int(removed_record['counter']) - 1
logging.info('SUCCESS: ' + temp['filename'] +
' removed from ' + csv_file)
# print('\n\n', temp)
# new = Photo(**temp)
# new.print_photo()
add_record_csv(temp, csv_file)
logging.info(f"{temp['filename']} successfully updated")
else:
logging.info('FAILED: ' + temp['filename']
+ ' NOT removed from ' + csv_file)
else:
logging.error(f"{filename} was NOT changed")
return temp
def add_multiple_csv_records(list_file_paths, csv_file, destination_folder=''):
"""
docstring
"""
for file_path in list_file_paths:
record = Photo.byPath(file_path, destination_folder)
add_record_csv(record.asdict(), csv_file)
# IMPURE
def fileTypeTest(file, typeList=cfg._fileType):
'''
checks if the file extension is in the list of
acceptable extensions.
The default list is in the config.ini under filetype
The parameter typeList=cfg._fileType, can be changed
to bypass the defaults.
'''
if file.endswith(typeList):
logging.debug('extension accepted ' + file)
return True
else:
logging.warning('extension invalid ' + file)
return False
def copyFiles(fileList,
ftype=cfg._fileType,
destination=cfg._destinationFolder,
csv=cfg._csv_destination):
'''
Copy a list of files to the folder
'''
logging.info('Copying ' + str(len(fileList)) + ' files')
# logging.debug(fileList)
for fname in fileList:
if fileTypeTest(fname, ftype):
logging.debug('Copying file ' + fname)
shutil.copy(fname, destination)
add_record_csv(Photo.byPath(fname, destination).asdict(), csv)
def getSizeMB(folder='.'):
'''
returns the size of the folder in bytes
'''
root_directory = Path(folder)
size = sum(f.stat().st_size
for f in root_directory.glob('**/*')
if f.is_file())
return size/(10**6)
# -----------------
# IMPURE
def update_csv_ListOfFiles(dirName, csv_file):
'''
For the given path, get the List of all files in the directory tree
'''
# create a list of files and sub directories
# names in the given directory
listOfFile = os.listdir(dirName)
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
update_csv_ListOfFiles(fullPath, csv_file)
else:
if fileTypeTest(entry, cfg._fileType):
record = Photo.byPath(fullPath)
add_record_csv(record.asdict(), csv_file)
else:
logging.debug(entry + ' INVALID FILE TYPE ' +
str(cfg._fileType))
if cfg._sourceFolder in dirName:
return rebuild_path_from_csv(csv_file, 'source_folder')
else:
return rebuild_path_from_csv(csv_file, 'destination_folder')
def clear_sample_source(csv_source, csv_destination):
"""
docstring
"""
return uncommon(rebuild_path_from_csv(csv_source, 'source_folder'),
rebuild_path_from_csv(csv_destination, 'source_folder'))
# IMPURE
def rebuild_path_from_csv(csv_file, folder):
"""
docstring
"""
temp = []
source = read_CSV(csv_file)
for item in source:
if item[folder][-1] == '/':
temp.append(''.join(item[folder] + item['filename']))
else:
temp.append(''.join(item[folder] + '/' + item['filename']))
logging.debug(f'Returning {len(temp)} path from {csv_file}')
# logging.debug(temp)
return temp
def sorting(filenames, criteria=1, sampleSize=10):
'''
Choosing and Sorting the sample
'''
# print(len(filenames), criteria, sampleSize)
if len(filenames) < sampleSize:
logging.warning('The Sample (' + str(sampleSize) +
') is bigger than the source size (' +
str(len(filenames)) + ')')
sampleSize = int(len(filenames) / 2) + (len(filenames) % 2 > 0)
logging.info('New Sample Size: ' + str(sampleSize))
# sorting criterias
if criteria == 1: # Random pics from source
logging.info('Getting a random set of ' + str(sampleSize) + ' Photos')
try:
sample = random.sample(filenames, sampleSize)
# logging.debug('SAMPLE: ' + str(sample))
return sample
except ValueError as error:
logging.error(error)
return False
# NO OTHER SORTING METHOD IS WORKING :-[
# elif criteria == 2:
# print('Getting a random set of ' + str(cfg._numberOfPics)) # +
# ' Photos with no less than ' + str(cfg._newerPhotos/365) +
# ' years')
# files = sorted(os.listdir(cfg._sourceFolder), key=os.path.getctime)
# # while files[i]. os.path.getctime > cfg._newerPhotos:
# for i in range(cfg._numberOfPics):
# newest = files[-i]
# return random.sample(newest, cfg._numberOfPics)
# elif criteria == 3:
# files = sorted(os.listdir(cfg._sourceFolder),
# key=os.path.getctime, reverse = True)
# return random.sample(files, cfg._numberOfPics)
# elif:
# oldest = files[0]
else:
logging.error('Sorting criteria not met n. of files: ' +
str(len(filenames)))
print('Sorting criteria not met')
# IMPURE
def folderPrunning(folder=cfg._destinationFolder,
csv_file=cfg._csv_destination,
multiplier=1,
upper_limit=cfg._foldersizeUpperLimit,
size=cfg._numberOfPics):
'''
checking size of the destination folder to trigger a cleanup
'''
folderSize = getSizeMB(folder)
logging.info('Destination folder Size ' + str(folderSize) + 'Mb')
if folderSize > upper_limit:
logging.debug('Prunning folder in ' + csv_file)
filenames = rebuild_path_from_csv(csv_file, 'destination_folder')
if len(filenames) > (cfg._numberOfPics * multiplier):
prune = sorting(filenames, 1, size * multiplier)
else:
prune = sorting(filenames, 1, size * int(multiplier/2))
logging.debug(f'To be pruned: {prune}')
for fname in prune:
logging.info('Removing file ' + fname)
filePrunning(fname, csv_file)
if getSizeMB(folder) == folderSize:
logging.error('FOLDER PRUNNING FAILED')
return False
else:
logging.info('Folder Size after prunning ' +
str(getSizeMB(folder)) + 'Mb')
else:
logging.info(str(folderSize) + ' smaller than ' + str(upper_limit))
return True
def filePrunning(_file, csv_file):
try:
temp_dict = update_record_csv(_file, csv_file, pruned=True)
# logging.debug(temp_dict)
if temp_dict['destination_folder'] != '':
os.remove(_file)
# filename, file_extension = os.path.splitext(_file)
# Thumbnail removal changes in index.html
# may require adjustments here:
# os.remove(temp_dict['destination_folder'] + '/thumbnail/' +
# filename + '_200x200_fit_90' + file_extension)
else:
# This code should delete a picture from source
logging.critical('ATTENTION: DELETING ORIGINAL FILE')
# os.remove(str(temp_dict['SOURCE_folder']) + _file)
except OSError as e:
logging.error(e.errno)
logging.error('FILE NOT FOUND ' + _file)
return 'File Not Found: ' + _file
else:
logging.info('file removed ' + _file)
return 'File removed: ' + _file
# ++++++++++++++++++++++++++++++++++++++
# ---------old list_module.py----------
# ++++++++++++++++++++++++++++++++++++++
# https://www.codespeedy.com/find-the-common-elements-in-two-lists-in-python/
# payload = request.get_data().decode("utf-8")
# list = getListOfFiles(cfg._destinationFolder)
def common(lst1, lst2):
if type(lst1) is str:
return common_string_in_list(lst1, lst2)
elif type(lst2) is str:
return common_string_in_list(lst2, lst1)
else:
return list(set(lst1).intersection(lst2))
# return list(set(lst1) & set(lst2))
def uncommon(base_list, special_list):
# remove EOL special string
# base_list = [item.replace('\n', '') for item in base_list]
# special_list = [item.replace('\n', '') for item in special_list]
a = set(base_list)
b = set(special_list)
# print(base_list, a)
# print(special_list, b)
# print(list(a - b))
return list(a - b)
def common_string_in_list(string, list):
new_list = []
for item in list:
if str(item) in string and '/' in str(item):
item = item.split('/')[-1:]
new_list.append(item[0])
elif str(item) in string:
new_list.append(item)
return new_list
def append_multiple_lines(file_name, lines_to_append):
# Open the file in append & read mode ('a+')
with open(file_name, "a+") as file_object:
appendEOL = False
# Move read cursor to the start of file.
file_object.seek(0)
# Check if file is not empty
data = file_object.read(100)
if len(data) > 0:
appendEOL = True
file_object.seek(0)
source = file_object.readlines()
# clear unwanted EOL from original file
source = [item.replace('\n', '') for item in source]
# remove possible duplicates
lines_to_append = uncommon(lines_to_append, source)
# Iterate over each string in the list
for line in lines_to_append:
# If file is not empty then append '\n' before first line for
# other lines always append '\n' before appending line
if appendEOL:
file_object.write("\n")
else:
appendEOL = True
# Append element at the end of file
file_object.write(line)
def reset_config(option=True):
stp.clean_folders(warning=0)
if option:
stp.setup()
else:
# update requirements for packing prior uplod to GitHub
stp.enhance_requirements()
def common_test():
a = [2, 9, 4, 5]
b = [3, 5, 7, 9]
c = '2,9,4,5'
print('[9, 5] ==', common(a, b))
print('[5, 9] ==', common(b, c))
print('[2, 4] ==', uncommon(a, b))
print(uncommon(b, c))
# --------------------------------
# Main function copy photos based on the parameters selected
# --------------------------------
def copy_job():
logging.info('--------COPY JOB START--------')
start = datetime.datetime.now()
print('Job Start time:', start)
logging.info('Loading list of available photos from: ' + cfg._sourceFolder)
filenames = clear_sample_source(cfg._csv_source, cfg._csv_destination)
if filenames == []:
filenames = update_csv_ListOfFiles(cfg._sourceFolder, cfg._csv_source)
logging.info('Found: ' + str(len(filenames)) + ' available files')
logging.info('choosing and Sorting the sample')
sample = sorting(filenames, cfg._criteria, cfg._numberOfPics)
if sample is not False:
logging.info('-------PRUNNING--------')
if (folderPrunning(multiplier=2)):
logging.info('Number of selected files on the sample: ' +
str(len(sample)))
# keeping source address of all files for backtrack
# append_multiple_lines(cfg._csv_source, sample)
copyFiles(sample)
else:
logging.error('Error! Failed to prune destination folder\n \
NO FILES COPIED.')
logging.info('New folder Size ' +
str(getSizeMB(cfg._destinationFolder)) + 'Mb')
logging.info('-' * 30)
end = datetime.datetime.now()
print('Job finish time:', end)
logging.info('Time elapsed:' + str(end-start) + 'secs')
logging.info('--------COPY JOB END----------')
if __name__ == '__main__':
# photo1 = Photo.byPath('engine/static/demo/source/black-crt-tv-showing-gray-screen-704555.jpg')
# photo2 = Photo.byPath('engine/static/demo/source/bandwidth-close-up-computer-connection-1148820.jpg')
# photo1.print_photo()
# print(photo1.asdict())
# photo1.add_record_csv('data/test.csv')
# photo2.add_record_csv('data/test.csv')
# print(datetime.datetime.fromtimestamp(photo1.datetime))
# print (remove_record_csv('bandwidth-close-up-computer-connection-1148820.jpg', 'data/test.csv'))
# update_record_csv('black-crt-tv-showing-gray-screen-704555.jpg','data/test.csv', favorite = True)
# print(read_CSV('data/test.csv'))
# update_csv_ListOfFiles(cfg._sourceFolder, 'data/test2.csv')
# filePrunning(pat-whelen-BDeSzt-dhxc-unsplash.jpg, csv_file)
# print(clear_sample_source('data/test.csv', 'data/test2.csv'))
copy_job()
|
"""
:author: Thomas Delaet <thomas@delaet.org>
"""
from __future__ import annotations
import json
import struct
from velbusaio.command_registry import register_command
from velbusaio.message import Message
COMMAND_CODE = 0xFB
CHANNEL_NORMAL = 0x00
CHANNEL_INHIBITED = 0x01
CHANNEL_FORCED_ON = 0x02
CHANNEL_DISABLED = 0x03
RELAY_ON = 0x01
RELAY_OFF = 0x00
INTERVAL_TIMER_ON = 0x03
LED_OFF = 0
LED_ON = 1 << 7
LED_SLOW_BLINKING = 1 << 6
LED_FAST_BLINKING = 1 << 5
LED_VERY_FAST_BLINKING = 1 << 4
class RelayStatusMessage(Message):
"""
send by: VMB4RYLD
received by:
"""
def __init__(self, address=None):
Message.__init__(self)
self.channel = 0
self.disable_inhibit_forced = 0
self.status = 0
self.led_status = 0
self.delay_time = 0
self.set_defaults(address)
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
self.needs_low_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 7)
self.set_attributes(priority, address, rtr)
self.channel = self.byte_to_channel(data[0])
self.disable_inhibit_forced = data[1]
self.status = data[2]
self.led_status = data[3]
(self.delay_time,) = struct.unpack(">L", bytes([0]) + data[4:])
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict["channel"] = self.channel
json_dict["disable_inhibit_forced"] = self.disable_inhibit_forced
json_dict["status"] = self.status
json_dict["led_status"] = self.led_status
json_dict["delay_time"] = self.delay_time
return json.dumps(json_dict)
def is_normal(self):
"""
:return: bool
"""
return self.disable_inhibit_forced == CHANNEL_NORMAL
def is_inhibited(self):
"""
:return: bool
"""
return self.disable_inhibit_forced == CHANNEL_INHIBITED
def is_forced_on(self):
"""
:return: bool
"""
return self.disable_inhibit_forced == CHANNEL_FORCED_ON
def is_disabled(self):
"""
:return: bool
"""
return self.disable_inhibit_forced == CHANNEL_DISABLED
def is_on(self):
"""
:return: bool
"""
return self.status == RELAY_ON
def is_off(self):
"""
:return: bool
"""
return self.status == RELAY_OFF
def channel_is_on(self):
"""
:return: bool
"""
if (self.status >> (self.channel - 1)) & 1 != 0:
return True
else:
return False
def has_interval_timer_on(self):
"""
:return: bool
"""
return self.status == INTERVAL_TIMER_ON
def data_to_binary(self):
"""
:return: bytes
"""
return (
bytes(
[
COMMAND_CODE,
self.channels_to_byte([self.channel]),
self.disable_inhibit_forced,
self.status,
self.led_status,
]
)
+ struct.pack(">L", self.delay_time)[-3:]
)
register_command(COMMAND_CODE, RelayStatusMessage)
|
import json
states_list = ["Alabama","Alaska","Arizona","Arkansas","California","Colorado",
"Connecticut","Delaware","Florida","Georgia","Hawaii","Idaho","Illinois",
"Indiana","Iowa","Kansas","Kentucky","Louisiana","Maine","Maryland",
"Massachusetts","Michigan","Minnesota","Mississippi","Missouri","Montana",
"Nebraska","Nevada","New Hampshire","New Jersey","New Mexico","New York",
"North Carolina","North Dakota","Ohio","Oklahoma","Oregon","Pennsylvania",
"Rhode Island","South Carolina","South Dakota","Tennessee","Texas","Utah",
"Vermont","Virginia","Washington","West Virginia","Wisconsin","Wyoming"]
all_states = {}
for state in states_list:
str_state = state
state ={}
state['Province/State'] = str(str_state)
state['Country/Region'] = "US"
state['Lat'] = "N/A"
state['Long'] = "N/A"
state["1/22/2020"] = 0
state["1/23/2020"] = 0
state["1/24/2020"] = 0
state["1/25/2020"] = 0
state["1/26/2020"] = 0
state["1/27/2020"] = 0
state["1/28/2020"] = 0
state["1/29/2020"] = 0
state["1/30/2020"] = 0
state["1/31/2020"] = 0
state["2/1/2020"] = 0
state["2/2/2020"] = 0
state["2/3/2020"] = 0
state["2/4/2020"] = 0
state["2/5/2020"] = 0
state["2/6/2020"] = 0
state["2/7/2020"] = 0
state["2/8/2020"] = 0
state["2/9/2020"] = 0
state["2/10/2020"] = 0
state["2/11/2020"] = 0
state["2/12/2020"] = 0
state["2/13/2020"] = 0
state["2/14/2020"] = 0
state["2/15/2020"] = 0
state["2/16/2020"] = 0
state["2/17/2020"] = 0
state["2/18/2020"] = 0
state["2/19/2020"] = 0
state["2/20/2020"] = 0
state["2/21/2020"] = 0
state["2/22/2020"] = 0
state["2/23/2020"] = 0
state["2/24/2020"] = 0
state["2/25/2020"] = 0
state["2/26/2020"] = 0
state["2/27/2020"] = 0
state["2/28/2020"] = 0
state["2/29/2020"] = 0
state["3/1/2020"] = 0
state["3/2/2020"] = 0
state["3/3/2020"] = 0
state["3/4/2020"] = 0
state["3/5/2020"] = 0
state["3/6/2020"] = 0
state["3/7/2020"] = 0
state["3/8/2020"] = 0
state["3/9/2020"] = 0
state["3/10/2020"] = 0
state["3/11/2020"] = 0
state["3/12/2020"] = 0
state["3/13/2020"] = 0
state["3/14/2020"] = 0
state["3/15/2020"] = 0
state["3/16/2020"] = 0
state["3/17/2020"] = 0
state["3/18/2020"] = 0
state["3/19/2020"] = 0
state["3/20/2020"] = 0
state["3/21/2020"] = 0
state["3/22/2020"] = 0
state["3/23/2020"] = 0
state["3/24/2020"] = 0
state["3/25/2020"] = 0
state["3/26/2020"] = 0
state["3/27/2020"] = 0
state["3/28/2020"] = 0
state["3/29/2020"] = 0
state["3/30/2020"] = 0
all_states[str(str_state)] = state
with open("counties_pile.json", 'r') as myfile:
data = myfile.read()
CTS_object = json.loads(data)
#print(CTS_object)
#print(all_states)
for county in CTS_object:
state_name = county["Province_State"]
if state_name in states_list:
access_state = all_states[state_name]
#print(access_state)
#list_access_state = list(access_state.items())
list_county = list(county.items())
#print(list_access_state[5][0])
#print(len(list_county))
for y in range(12, len(list_county)):
daily_cases = list_county[y][1] - list_county[y-1][1]
#print(access_state[list_county[y][0]] )
access_state[list_county[y][0]] = access_state[list_county[y][0]] + daily_cases
#print(access_state[list_county[y][0]]) =
#access_state[] = access_state[list_county[y][1]] + daily_cases
#print(list_county[y][0])
#print(all_states)
export_list = []
for x in all_states:
export_list.append(all_states[x])
obj_JSON = json.dumps(export_list)
with open("3-30-daily-states3.json", "w") as outfile:
outfile.write(obj_JSON)
|
import sys
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.constants import *
import Proyect_support
import os
import pyautogui as robot
import time
import webbrowser
def meet_por_web(page):
webbrowser.open(page)
entrar_reunion=965,539
aceptar=1065,225
def abrir(pos,click=1):
robot.moveTo(pos) #.moveTo para mover a posicion
robot.click(clicks=click)
time.sleep(2)
abrir(entrar_reunion)
time.sleep(2)
abrir(aceptar)
def meet_with_password(codigo,password):
os.startfile(r'C:\Users\Usuario\AppData\Roaming\Zoom\bin\Zoom.exe')
entrar=793,496
desactivar_video=803,616
borrar_nombre=942,537
cambiar_nombre=807,539
ubicacion_codigo=814,479
ubicacion_contraseña=840,483
time.sleep(2)
def abrir(pos,click=1):
robot.moveTo(pos) #.moveTo para mover a posicion
robot.click(clicks=click)
time.sleep(2)
abrir(entrar)
time.sleep(2)
abrir(desactivar_video)
time.sleep(2)
abrir(borrar_nombre)
time.sleep(2)
abrir(cambiar_nombre)
time.sleep(2)
robot.typewrite("91570 ")
time.sleep(2)
abrir(ubicacion_codigo)
time.sleep(2)
robot.typewrite(codigo)
robot.hotkey("enter")
time.sleep(2)
abrir(ubicacion_contraseña)
time.sleep(2)
robot.typewrite(password)
robot.hotkey("enter")
def meet(codigo):
os.startfile(r'C:\Users\Usuario\AppData\Roaming\Zoom\bin\Zoom.exe')
entrar=793,496
desactivar_video=803,616
borrar_nombre=942,537
cambiar_nombre=807,539
ubicacion_codigo=814,479
time.sleep(2)
def abrir(pos,click=1):
robot.moveTo(pos) #.moveTo para mover a posicion
robot.click(clicks=click)
time.sleep(2)
abrir(entrar)
time.sleep(2)
abrir(desactivar_video)
time.sleep(2)
abrir(borrar_nombre)
time.sleep(2)
abrir(cambiar_nombre)
time.sleep(2)
robot.typewrite("91570 ")
time.sleep(2)
abrir(ubicacion_codigo)
time.sleep(2)
robot.typewrite(codigo)
robot.hotkey("enter")
time.sleep(2)
class Toplevel1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
top.geometry("250x400+580+316")
top.minsize(120, 1)
top.maxsize(3004, 1920)
top.resizable(1, 1)
top.title("Zoom")
top.configure(background="#000000")
top.configure(highlightbackground="#ffffff")
self.top = top
self.Label2 = tk.Label(self.top)
self.Label2.place(relx=0.04, rely=0.075, height=55, width=47)
self.Label2.configure(background="#0080ff")
self.Label2.configure(compound='left')
self.Label2.configure(disabledforeground="#a3a3a3")
self.Label2.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 17")
self.Label2.configure(foreground="#000000")
self.Label2.configure(text='''ACO''')
self.Label3 = tk.Label(self.top)
self.Label3.place(relx=0.036, rely=0.43, height=55, width=47)
self.Label3.configure(activeforeground="#000000")
self.Label3.configure(background="#ff0000")
self.Label3.configure(compound='left')
self.Label3.configure(disabledforeground="#a3a3a3")
self.Label3.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 17")
self.Label3.configure(foreground="#000000")
self.Label3.configure(text='''AGA''')
self.Label3_1 = tk.Label(self.top)
self.Label3_1.place(relx=0.036, rely=0.253, height=55, width=47)
self.Label3_1.configure(activebackground="#f9f9f9")
self.Label3_1.configure(activeforeground="black")
self.Label3_1.configure(background="#00ff80")
self.Label3_1.configure(compound='left')
self.Label3_1.configure(disabledforeground="#a3a3a3")
self.Label3_1.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 17")
self.Label3_1.configure(foreground="#000000")
self.Label3_1.configure(highlightbackground="#d9d9d9")
self.Label3_1.configure(highlightcolor="black")
self.Label3_1.configure(text='''ASI''')
self.Label3_2 = tk.Label(self.top)
self.Label3_2.place(relx=0.04, rely=0.6, height=55, width=47)
self.Label3_2.configure(activebackground="#f9f9f9")
self.Label3_2.configure(activeforeground="black")
self.Label3_2.configure(background="#ff80ff")
self.Label3_2.configure(compound='left')
self.Label3_2.configure(disabledforeground="#a3a3a3")
self.Label3_2.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 17")
self.Label3_2.configure(foreground="#000000")
self.Label3_2.configure(highlightbackground="#d9d9d9")
self.Label3_2.configure(highlightcolor="black")
self.Label3_2.configure(text='''FIS''')
self.Label3_2_1 = tk.Label(self.top)
self.Label3_2_1.place(relx=0.04, rely=0.788, height=54, width=47)
self.Label3_2_1.configure(activebackground="#f9f9f9")
self.Label3_2_1.configure(activeforeground="black")
self.Label3_2_1.configure(background="#ff8000")
self.Label3_2_1.configure(compound='left')
self.Label3_2_1.configure(disabledforeground="#a3a3a3")
self.Label3_2_1.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 17")
self.Label3_2_1.configure(foreground="#000000")
self.Label3_2_1.configure(highlightbackground="#d9d9d9")
self.Label3_2_1.configure(highlightcolor="black")
self.Label3_2_1.configure(text='''SSL''')
self.menubar = tk.Menu(top,font="TkMenuFont",bg=_bgcolor,fg=_fgcolor)
top.configure(menu = self.menubar)
self.practico1 = tk.Button(self.top,command=lambda: meet_with_password('872 4589 0217','197481'))
self.practico1.place(relx=0.268, rely=0.075, height=55, width=77)
self.practico1.configure(activebackground="#ececec")
self.practico1.configure(activeforeground="#000000")
self.practico1.configure(background="#d9d9d9")
self.practico1.configure(compound='left')
self.practico1.configure(disabledforeground="#a3a3a3")
self.practico1.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.practico1.configure(foreground="#000000")
self.practico1.configure(highlightbackground="#d9d9d9")
self.practico1.configure(highlightcolor="black")
self.practico1.configure(pady="0")
self.practico1.configure(relief="solid")
self.practico1.configure(text='''PRACTICO''')
self.teorico1 = tk.Button(self.top,command=lambda: meet_with_password('841 0126 7815','ACO1K822'))
self.teorico1.place(relx=0.612, rely=0.075, height=55, width=77)
self.teorico1.configure(activebackground="#ececec")
self.teorico1.configure(activeforeground="#000000")
self.teorico1.configure(background="#d9d9d9")
self.teorico1.configure(compound='left')
self.teorico1.configure(disabledforeground="#a3a3a3")
self.teorico1.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.teorico1.configure(foreground="#000000")
self.teorico1.configure(highlightbackground="#d9d9d9")
self.teorico1.configure(highlightcolor="black")
self.teorico1.configure(pady="0")
self.teorico1.configure(relief="solid")
self.teorico1.configure(text='''TEORICO''')
self.practico2 = tk.Button(self.top,command=lambda: meet_with_password('872 4589 0217','197481'))
self.practico2.place(relx=0.268, rely=0.253, height=55, width=77)
self.practico2.configure(activebackground="#ececec")
self.practico2.configure(activeforeground="#000000")
self.practico2.configure(background="#d9d9d9")
self.practico2.configure(compound='left')
self.practico2.configure(disabledforeground="#a3a3a3")
self.practico2.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.practico2.configure(foreground="#000000")
self.practico2.configure(highlightbackground="#d9d9d9")
self.practico2.configure(highlightcolor="black")
self.practico2.configure(pady="0")
self.practico2.configure(relief="solid")
self.practico2.configure(text='''PRACTICO''')
self.practico3 = tk.Button(self.top,command=lambda:meet_por_web('https://utn.zoom.us/j/9101918470?pwd=Z1dKSTZXaVJIbGY4UllsK1gxdDFQZz09#success'))
self.practico3.place(relx=0.268, rely=0.43, height=55, width=77)
self.practico3.configure(activebackground="#ececec")
self.practico3.configure(activeforeground="#000000")
self.practico3.configure(background="#d9d9d9")
self.practico3.configure(compound='left')
self.practico3.configure(disabledforeground="#a3a3a3")
self.practico3.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.practico3.configure(foreground="#000000")
self.practico3.configure(highlightbackground="#d9d9d9")
self.practico3.configure(highlightcolor="black")
self.practico3.configure(pady="0")
self.practico3.configure(relief="solid")
self.practico3.configure(text='''PRACTICO''')
self.practico4 = tk.Button(self.top,command=lambda: meet_with_password('881 7790 9926','1'))
self.practico4.place(relx=0.28, rely=0.6, height=55, width=77)
self.practico4.configure(activebackground="#ececec")
self.practico4.configure(activeforeground="#000000")
self.practico4.configure(background="#d9d9d9")
self.practico4.configure(compound='left')
self.practico4.configure(disabledforeground="#a3a3a3")
self.practico4.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.practico4.configure(foreground="#000000")
self.practico4.configure(highlightbackground="#d9d9d9")
self.practico4.configure(highlightcolor="black")
self.practico4.configure(pady="0")
self.practico4.configure(relief="solid")
self.practico4.configure(text='''PRACTICO''')
self.practico5 = tk.Button(self.top,command=lambda: meet_with_password('819 6405 2505','875739'))
self.practico5.place(relx=0.268, rely=0.788, height=55, width=77)
self.practico5.configure(activebackground="#ececec")
self.practico5.configure(activeforeground="#000000")
self.practico5.configure(background="#d9d9d9")
self.practico5.configure(compound='left')
self.practico5.configure(disabledforeground="#a3a3a3")
self.practico5.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.practico5.configure(foreground="#000000")
self.practico5.configure(highlightbackground="#d9d9d9")
self.practico5.configure(highlightcolor="black")
self.practico5.configure(pady="0")
self.practico5.configure(relief="solid")
self.practico5.configure(text='''PRACTICO''')
self.teorico2 = tk.Button(self.top,command=lambda: meet_with_password('819 1021 4476','853835'))
self.teorico2.place(relx=0.612, rely=0.253, height=55, width=77)
self.teorico2.configure(activebackground="#ececec")
self.teorico2.configure(activeforeground="#000000")
self.teorico2.configure(background="#d9d9d9")
self.teorico2.configure(compound='left')
self.teorico2.configure(disabledforeground="#a3a3a3")
self.teorico2.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.teorico2.configure(foreground="#000000")
self.teorico2.configure(highlightbackground="#d9d9d9")
self.teorico2.configure(highlightcolor="black")
self.teorico2.configure(pady="0")
self.teorico2.configure(relief="solid")
self.teorico2.configure(text='''TEORICO''')
self.teorico3 = tk.Button(self.top,command=lambda: meet_with_password('890 2339 1228','794613'))
self.teorico3.place(relx=0.612, rely=0.43, height=55, width=77)
self.teorico3.configure(activebackground="#ececec")
self.teorico3.configure(activeforeground="#000000")
self.teorico3.configure(background="#d9d9d9")
self.teorico3.configure(compound='left')
self.teorico3.configure(disabledforeground="#a3a3a3")
self.teorico3.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.teorico3.configure(foreground="#000000")
self.teorico3.configure(highlightbackground="#d9d9d9")
self.teorico3.configure(highlightcolor="black")
self.teorico3.configure(pady="0")
self.teorico3.configure(relief="solid")
self.teorico3.configure(text='''TEORICO''')
self.teorico4 = tk.Button(self.top,command=lambda: meet_with_password('836 8074 3547','675579'))
self.teorico4.place(relx=0.612, rely=0.6, height=55, width=77)
self.teorico4.configure(activebackground="#ececec")
self.teorico4.configure(activeforeground="#000000")
self.teorico4.configure(background="#d9d9d9")
self.teorico4.configure(compound='left')
self.teorico4.configure(disabledforeground="#a3a3a3")
self.teorico4.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.teorico4.configure(foreground="#000000")
self.teorico4.configure(highlightbackground="#d9d9d9")
self.teorico4.configure(highlightcolor="black")
self.teorico4.configure(pady="0")
self.teorico4.configure(relief="solid")
self.teorico4.configure(text='''TEORICO''')
self.teorico5 = tk.Button(self.top,command=lambda: meet('828 3051 3336'))
self.teorico5.place(relx=0.612, rely=0.788, height=55, width=77)
self.teorico5.configure(activebackground="#ececec")
self.teorico5.configure(activeforeground="#000000")
self.teorico5.configure(background="#d9d9d9")
self.teorico5.configure(compound='left')
self.teorico5.configure(disabledforeground="#a3a3a3")
self.teorico5.configure(font="-family {Tw Cen MT Condensed Extra Bold} -size 10 -weight bold")
self.teorico5.configure(foreground="#000000")
self.teorico5.configure(highlightbackground="#d9d9d9")
self.teorico5.configure(highlightcolor="black")
self.teorico5.configure(pady="0")
self.teorico5.configure(relief="solid")
self.teorico5.configure(text='''TEORICO''')
def start_up():
Proyect_support.main()
if __name__ == '__main__':
Proyect_support.main()
|
#! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2014 Gael Varoquaux
# 2014-2016 Sergei Lebedev <superbobry@gmail.com>
"""Hidden Markov Models in Python with scikit-learn like API"""
import sys
try:
from numpy.distutils.misc_util import get_info
except ImportError:
# A dirty hack to get RTD running.
def get_info(name):
return {}
from setuptools import setup, Extension
DISTNAME = "hmmlearn"
DESCRIPTION = __doc__
LONG_DESCRIPTION = open("README.rst").read()
MAINTAINER = "Sergei Lebedev"
MAINTAINER_EMAIL = "superbobry@gmail.com"
LICENSE = "new BSD"
CLASSIFIERS = [
"Development Status :: 3 - Alpha",
"License :: OSI Approved",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
import hmmlearn
VERSION = hmmlearn.__version__
install_requires = ["sklearn>=0.16"]
tests_require = install_requires + ["pytest"]
docs_require = install_requires + [
"Sphinx", "sphinx-gallery", "numpydoc", "Pillow", "matplotlib"
]
setup_options = dict(
name="hmmlearn",
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url="https://github.com/hmmlearn/hmmlearn",
packages=["hmmlearn", "hmmlearn.tests"],
classifiers=CLASSIFIERS,
ext_modules=[
Extension("hmmlearn._hmmc", ["hmmlearn/_hmmc.c"],
extra_compile_args=["-O3"],
**get_info("npymath"))
],
tests_require=tests_require,
extras_require={
"tests": tests_require,
"docs": docs_require
}
)
if __name__ == "__main__":
setup(**setup_options)
|
#!/usr/bin/env python3.5
import defparser
import sys
import argparse
import blifparser
import utils
import datetime
import extractrc
from spefstruct import SPEF
from defstruct import DEF
from modtypes import Unit
from libstruct import LIB
from lefstruct import LEF
def get_parsers():
"""docstring for get_parsers"""
return (defparser.defparser(),blifparser.blifparser())
def parse_to_structs(DEFparser,DEFlexer, BLIFparser, BLIFlexer, DEF_txt, BLIF_txt):
DEF_datastruct = DEFparser.parse(DEF_txt,lexer=DEFlexer)
BLIF_datastruct = BLIFparser.parse(BLIF_txt,lexer=BLIFlexer)
return (DEF_datastruct ,BLIF_datastruct)
def get_files_txts(args):
def aux(file_name): # Auxilary function
with open(file_name,'r') as txt_tobeparsed:
return txt_tobeparsed.read()
DEF_txt=aux(args.DEF)
BLIF_txt=aux(args.BLIF)
LIB_txt=aux(args.LIB)
LEF_txt=aux(args.LEF)
return (DEF_txt, BLIF_txt, LIB_txt, LEF_txt)
def construct_SPEF_file(DEF_datastruct, BLIF_datastruct, units, LEF_txt, LIB_txt):
DEF_declarations = DEF_datastruct.decs
stdnum = utils.quoteit("IEEE 1481-1998")
design = utils.quoteit(str(DEF_declarations.get_design()))
todaydate = utils.quoteit(str(datetime.date.today()))
vendor = utils.quoteit("Intended Tool")
program = utils.quoteit("def2spef")
fileversion = utils.quoteit("0.1")
designflow = utils.quoteit("Design_Flow")
dividerchar = DEF_declarations.get_dividerchar()
delimiterchar = ":"
busdelimiterchar = DEF_declarations.get_busbitchars()
tunit = "1 PS"
cunit = "1 PF"
runit = "1 OHM"
lunit = "1 UH"
decl_list = [SPEF.Header.StdNum(stdnum), SPEF.Header.DesName(design),SPEF.Header.CreDate(todaydate),SPEF.Header.Vendor(vendor),SPEF.Header.Prog(program),SPEF.Header.FVer(fileversion),SPEF.Header.DesFlow(designflow),SPEF.Header.DivChar(dividerchar),SPEF.Header.DelimChar(delimiterchar),SPEF.Header.BusDelimChar(busdelimiterchar),SPEF.Header.TUnit(tunit),SPEF.Header.CUnit(cunit),SPEF.Header.RUnit(runit),SPEF.Header.LUnit(lunit)]
header = SPEF.Header(decl_list)
internal = extractrc.ext_rc(header, DEF_datastruct, BLIF_datastruct, LEF_txt, LIB_txt)
return SPEF(header, internal)
if __name__=="__main__":
args_parser = argparse.ArgumentParser(prog='def2spef.py',description="""Converts DEF files to SPEF files via parasitics extraction, given a LIB, LEF and BLIF files.\n
\nif no output file is specified, a SPEF file is created with the DEF file name, extension included""")
args_parser.add_argument('DEF', help='Design Exchange Format file name')
args_parser.add_argument('LEF', help='Library Exchange Format file name')
args_parser.add_argument('LIB', help='Liberty file name')
args_parser.add_argument('BLIF', help='Berkley Logic Interchange Format file name')
args_parser.add_argument('-out', help='output file name')
args = args_parser.parse_args(sys.argv[1:])
(DEFparser,DEFlexer),( BLIFparser,BLIFlexer) = get_parsers()
DEF_txt, BLIF_txt, LIB_txt, LEF_txt = get_files_txts(args)
# Optional Argument
if args.out:
SPEF_handler = open(args.out, 'w')
else:
print("here and the out file name is ", args.DEF+'.spef')
SPEF_handler = open(args.DEF+'.spef', 'w')
DEF_datastruct,BLIF_datastruct = parse_to_structs(DEFparser,DEFlexer, BLIFparser, BLIFlexer, DEF_txt, BLIF_txt)
units = LIB.get_all_units(LIB_txt)
SPEF_datastruct = construct_SPEF_file(DEF_datastruct, BLIF_datastruct,units, LEF_txt, LIB_txt)
utils.write_any(SPEF_datastruct, SPEF_handler)
LEF.get_database_unit(LEF_txt)
|
# Uncomment for Challenge #7
import datetime
import random
from redis.client import Redis
from redisolar.dao.base import RateLimiterDaoBase
from redisolar.dao.redis.base import RedisDaoBase
from redisolar.dao.redis.key_schema import KeySchema
# Uncomment for Challenge #7
from redisolar.dao.base import RateLimitExceededException
class SlidingWindowRateLimiter(RateLimiterDaoBase, RedisDaoBase):
"""A sliding-window rate-limiter."""
def __init__(self,
window_size_ms: float,
max_hits: int,
redis_client: Redis,
key_schema: KeySchema = None,
**kwargs):
self.window_size_ms = window_size_ms
self.max_hits = max_hits
super().__init__(redis_client, key_schema, **kwargs)
def hit(self, name: str):
"""Record a hit using the rate-limiter."""
# START Challenge #7
key = self.key_schema.sliding_window_rate_limiter_key(name, int(self.window_size_ms), self.max_hits)
pipeline = self.redis.pipeline(transaction=False)
timestamp = datetime.datetime.now().timestamp() * 1000
score = timestamp
value = f'{score}-{str(random.randint(0, 2**32))}'
# Step 1
pipeline.zadd(key, {value: score})
# Step 2
pipeline.zremrangebyscore(key, -1, timestamp - self.window_size_ms)
# Step 3
pipeline.zcard(key)
hits = pipeline.execute()[2]
if hits > self.max_hits:
raise RateLimitExceededException()
# END Challenge #7
|
"""
This is a python script to add users to groups in a Weblogic Domain that uses the embedded LDAP.
The script needs to connect to a running Weblogic Admin Server.
This script requires a comma-separated-values (csv) file containing the users you wish to add
the groups, in the following format:
groupname, username
Both values are required.
The script will try to use the follow default values:
- Csv file name: will match the script file name with .csv extension.
For example: add_users_to_groups.csv. If not defined in the environment
variables, the script will try to look at the same location where the script is
running.
- Weblogic admin username: the weblogic admin user to connect to the admin server.
Default value: weblogic
- Weblogic admin user password: admin user password to connect to the admin server.
Default value: Welcome1
- Weblogic admin server URL: the admin server url and port, in the following format:
t3://HOSTNAME:PORT. Default value: t3://localhost:7001
You can override the defaults by setting the following environment variables to:
CSV_FILE - The full path to the csv file containing users
WLS_USER - The weblogic admin user used to connect to admin server
WLS_PASS - The weblogic admin user password
WLS_URL - The weblogic admin server URL.
To invoke this script, simply call WLST passing the script full path as argument.
For example:
/home/oracle/MW_HOME/common/bin/wlst.sh add_users_to_groups.py
"""
import os, sys, fileinput
from weblogic.management.security.authentication import GroupEditorMBean
print '---- WLST User-Group Assignment Start ----\n'
# Get the current path of the script and build the users cvs file name
# assuming they are in the same directory
dir_name = os.path.dirname(sys.argv[0])
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.csv'
csv_file = os.path.join(dir_name, file_name)
# Location of the csv file, if not set will use users.csv
csv_file = os.environ.get('CSV_FILE', csv_file)
# Weblogic admin user, if not set will use weblogic
wls_user = os.environ.get('WLS_USER', 'weblogic')
# Weblogic admin password, if not set will use Welcome1
wls_password = os.environ.get('WLS_PASS', 'Welcome1')
# Weblogic Admin Server URL, if not set will use t3://localhost:7001
wls_url = os.environ.get('WLS_URL', 't3://localhost:7001')
print 'Groups file to process: \'' + csv_file + '\'\n'
# Connects to WLS Admin Server
connect(wls_user, wls_password, wls_url)
# Obtains the AuthenticatorProvider MBean
atnr = cmo.getSecurityConfiguration().getDefaultRealm().lookupAuthenticationProvider("DefaultAuthenticator")
group = ''
username = ''
try:
print 'Starting user-group assignemt\n'
# Read the csv file
for line in fileinput.input(csv_file):
# Split the line by comma
i = line.split(',')
# Get the group name
group = i[0].strip()
# Get the username
username = i[1].strip()
# If group and user exist
if atnr.groupExists(group) and atnr.userExists(username):
print 'Adding user \'' + username + '\' to group \'' + group + '\'...'
try:
# Add user to group
atnr.addMemberToGroup(group, username)
except weblogic.management.utils.InvalidParameterException, ie:
print('Error while adding user to group')
print str(ie)
pass
print 'Adding user \'' + username + '\' to group \'' + group + '\' successfully!\n'
else:
print 'Group \'' + group + '\' or user \'' + username + '\' does not exist, skipping...\n'
except StandardError, e:
print 'Exception raised: ' + str(e)
print 'Terminating script...'
print '---- WLST User-Group Assignment End ----'
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer dispatcher for subcommands
"""
from bigmler.dispatcher import main_dispatcher
from bigmler.sample.dispatcher import sample_dispatcher
from bigmler.analyze.dispatcher import analyze_dispatcher
from bigmler.cluster.dispatcher import cluster_dispatcher
from bigmler.anomaly.dispatcher import anomaly_dispatcher
from bigmler.delete.dispatcher import delete_dispatcher
from bigmler.report.dispatcher import report_dispatcher
from bigmler.reify.dispatcher import reify_dispatcher
from bigmler.project.dispatcher import project_dispatcher
from bigmler.association.dispatcher import association_dispatcher
from bigmler.logisticregression.dispatcher import logistic_regression_dispatcher
from bigmler.linearregression.dispatcher import linear_regression_dispatcher
try:
from bigmler.topicmodel.dispatcher import topic_model_dispatcher
NO_STEMMER = False
except ImportError:
NO_STEMMER = True
from bigmler.timeseries.dispatcher import time_series_dispatcher
from bigmler.deepnet.dispatcher import deepnet_dispatcher
from bigmler.execute.dispatcher import execute_dispatcher
from bigmler.whizzml.dispatcher import whizzml_dispatcher
from bigmler.export.dispatcher import export_dispatcher
from bigmler.retrain.dispatcher import retrain_dispatcher
from bigmler.pca.dispatcher import pca_dispatcher
from bigmler.fusion.dispatcher import fusion_dispatcher
from bigmler.dataset.dispatcher import dataset_dispatcher
from bigmler.externalconnector.dispatcher import connector_dispatcher
def subcommand_dispatcher(subcommand, args):
""" Calls the corresponding subcommand dispatcher
"""
return globals()["%s_dispatcher" % subcommand.replace("-", "_")](args)
|
import daft
with daft.PGM() as pgm:
pgm.add_node(
daft.Node(
"d",
"D",
0,
0,
plot_params=dict(fc="white"),
fontsize=17,
offset=(0, -1),
label_params=dict(fontweight="bold"),
)
)
pgm.render()
pgm.figure.patch.set_facecolor("none")
pgm.ax.patch.set_facecolor("none")
pgm.figure.savefig("favicon.png", transparent=True)
|
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1443802885.3088007
_enable_loop = True
_template_filename = '/usr/local/lib/python3.4/dist-packages/nikola/data/themes/base/templates/comments_helper_disqus.tmpl'
_template_uri = 'comments_helper_disqus.tmpl'
_source_encoding = 'utf-8'
_exports = ['comment_link_script', 'comment_form', 'comment_link']
import json
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer('\n')
__M_writer('\n\n')
__M_writer('\n\n')
__M_writer('\n\n\n')
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link_script(context):
__M_caller = context.caller_stack._push_frame()
try:
comment_system_id = context.get('comment_system_id', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n')
if comment_system_id:
__M_writer(' <script>var disqus_shortname="')
__M_writer(str(comment_system_id))
__M_writer('";(function(){var a=document.createElement("script");a.async=true;a.src="//"+disqus_shortname+".disqus.com/count.js";(document.getElementsByTagName("head")[0]||document.getElementsByTagName("body")[0]).appendChild(a)}());</script>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_form(context,url,title,identifier):
__M_caller = context.caller_stack._push_frame()
try:
comment_system_id = context.get('comment_system_id', UNDEFINED)
lang = context.get('lang', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n')
if comment_system_id:
__M_writer(' <div id="disqus_thread"></div>\n <script>\n var disqus_shortname ="')
__M_writer(str(comment_system_id))
__M_writer('",\n')
if url:
__M_writer(' disqus_url="')
__M_writer(str(url))
__M_writer('",\n')
__M_writer(' disqus_title=')
__M_writer(str(json.dumps(title)))
__M_writer(',\n disqus_identifier="')
__M_writer(str(identifier))
__M_writer('",\n disqus_config = function () {\n')
if lang == 'es':
__M_writer(' this.language = "es_ES";\n')
else:
__M_writer(' this.language = "')
__M_writer(str(lang))
__M_writer('";\n')
__M_writer(' };\n (function() {\n var dsq = document.createElement(\'script\'); dsq.async = true;\n dsq.src = \'//\' + disqus_shortname + \'.disqus.com/embed.js\';\n (document.getElementsByTagName(\'head\')[0] || document.getElementsByTagName(\'body\')[0]).appendChild(dsq);\n })();\n </script>\n <noscript>Please enable JavaScript to view the <a href="//disqus.com/?ref_noscript" rel="nofollow">comments powered by Disqus.</a></noscript>\n <a href="//disqus.com" class="dsq-brlink" rel="nofollow">Comments powered by <span class="logo-disqus">Disqus</span></a>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link(context,link,identifier):
__M_caller = context.caller_stack._push_frame()
try:
comment_system_id = context.get('comment_system_id', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n')
if comment_system_id:
__M_writer(' <a href="')
__M_writer(str(link))
__M_writer('#disqus_thread" data-disqus-identifier="')
__M_writer(str(identifier))
__M_writer('">Comments</a>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"uri": "comments_helper_disqus.tmpl", "source_encoding": "utf-8", "filename": "/usr/local/lib/python3.4/dist-packages/nikola/data/themes/base/templates/comments_helper_disqus.tmpl", "line_map": {"16": 3, "18": 0, "23": 2, "24": 3, "25": 31, "26": 37, "27": 44, "33": 40, "38": 40, "39": 41, "40": 42, "41": 42, "42": 42, "48": 5, "54": 5, "55": 6, "56": 7, "57": 9, "58": 9, "59": 10, "60": 11, "61": 11, "62": 11, "63": 13, "64": 13, "65": 13, "66": 14, "67": 14, "68": 16, "69": 17, "70": 18, "71": 19, "72": 19, "73": 19, "74": 21, "80": 33, "85": 33, "86": 34, "87": 35, "88": 35, "89": 35, "90": 35, "91": 35, "97": 91}}
__M_END_METADATA
"""
|
from aiohttp import web
async def handle(request):
name = request.match_info.get('name', "Anonymous")
text = "Hello, " + name
return web.Response(text=text)
app = web.Application()
app.add_routes([web.get('/', handle),
web.get('/{name}', handle)])
if __name__ == '__main__':
web.run_app(app)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-09-21 10:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0067_attachmentrequest_timestamp'),
]
operations = [
migrations.AddField(
model_name='attachmentfile',
name='external_id',
field=models.CharField(db_index=True, max_length=50, null=True),
),
]
|
import os
import tensorflow as tf
import sonnet as snt
from luminoth.datasets.exceptions import InvalidDataDirectory
class BaseDataset(snt.AbstractModule):
def __init__(self, config, **kwargs):
super(BaseDataset, self).__init__(**kwargs)
self._dataset_dir = config.dataset.dir
self._num_epochs = config.train.num_epochs
self._batch_size = config.train.batch_size
self._split = config.dataset.split
self._random_shuffle = config.train.random_shuffle
self._seed = config.train.seed
self._fixed_resize = (
'fixed_height' in config.dataset.image_preprocessing and
'fixed_width' in config.dataset.image_preprocessing
)
if self._fixed_resize:
self._image_fixed_height = (
config.dataset.image_preprocessing.fixed_height
)
self._image_fixed_width = (
config.dataset.image_preprocessing.fixed_width
)
self._total_queue_ops = 20
def _build(self):
# Find split file from which we are going to read.
split_path = os.path.join(
self._dataset_dir, '{}.tfrecords'.format(self._split)
)
if not tf.gfile.Exists(split_path):
raise InvalidDataDirectory(
'"{}" does not exist.'.format(split_path)
)
# String input producer allows for a variable number of files to read
# from. We just know we have a single file.
filename_queue = tf.train.string_input_producer(
[split_path], num_epochs=self._num_epochs, seed=self._seed
)
# Define reader to parse records.
reader = tf.TFRecordReader()
_, raw_record = reader.read(filename_queue)
values, dtypes, names = self.read_record(raw_record)
if self._random_shuffle:
queue = tf.RandomShuffleQueue(
capacity=100,
min_after_dequeue=0,
dtypes=dtypes,
names=names,
name='tfrecord_random_queue',
seed=self._seed
)
else:
queue = tf.FIFOQueue(
capacity=100,
dtypes=dtypes,
names=names,
name='tfrecord_fifo_queue'
)
# Generate queueing ops for QueueRunner.
enqueue_ops = [queue.enqueue(values)] * self._total_queue_ops
self.queue_runner = tf.train.QueueRunner(queue, enqueue_ops)
tf.train.add_queue_runner(self.queue_runner)
return queue.dequeue()
|
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, JsonResponse
from django.shortcuts import render
from .ady_API import *
from .secrets import secrets
import json
# Fixed variables
value = 1500
currency = "EUR"
# Default views - Redirect to the next page
def index(request):
return HttpResponseRedirect("/country/")
# Simple pre-checkout pages
def country(request):
return render(request, 'base/country.html')
def cart(request):
return render(request, 'base/cart.html')
## API
# /payments API implementation
def ady_api_payments(request):
if request.method == 'POST':
requestData = ady_payments(request, value, currency)
return JsonResponse(requestData.message)
else:
return HttpResponseNotFound('<p>Incorrect data</p>')
# /payments API implementation
def ady_api_payments_details(request):
if request.method == 'POST':
requestData = ady_payments_details(request)
return JsonResponse(requestData.message)
else:
return HttpResponseNotFound('<p>Incorrect data</p>')
## CHECKOUT EXPERIENCE
# Present Adyen Drop-in
def payment_checkout(request, country_code):
return render(request, 'base/payment_checkout.html', context=
{
'paymentMethods': ady_paymentMethods(country_code, value, currency).message,
'originKey': secrets()['originKey'],
})
# Parse and process the return URL from Adyen
def payment_processing(request):
if request.method == "GET":
details = json.dumps(request.GET)
elif request.method == "POST":
details = json.dumps(request.POST)
else:
return 0 # neither a GET or a POST
return render(request, 'base/payment_processing.html', context=
{
'ady_details': details,
})
# Result pages
def payment_success(request):
return render(request, 'base/payment_success.html')
def payment_pending(request):
return render(request, 'base/payment_pending.html')
def payment_error(request, reason):
return render(request, 'base/payment_error.html', context={
'reason': reason,
})
|
from bs4 import BeautifulSoup
from selenium import webdriver
from time import sleep
import os
from datetime import *
from pathlib import Path
from openpyxl.workbook.workbook import Workbook
from openpyxl import load_workbook
from openpyxl.styles import Font
from openpyxl.utils import get_column_letter
# Here change and add path of your selenium diver in the variable path
PATH = r"enter path here"
# You can uncomment here the options, if you want chrome to be headless or invisible
options = webdriver.ChromeOptions()
options.add_argument("--enable-javascript")
#options.add_argument("--headless")
options.add_argument("user-agent=Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Microsoft; Lumia 640 XL LTE) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Mobile Safari/537.36 Edge/12.10166")
options.add_argument('--log-level=3')
counter = 1
crypto_list = []
discarded = [] # list containing data of rugpulls crypto
no_rugpulls = [] # list containing data of crypto which are not rugpulls
no_honeypots = []
def get_data(counter):
dr = webdriver.Chrome(executable_path=PATH, chrome_options=options)
dr.get("https://poocoin.app/ape")
print("Waiting 25 sec for the content to load")
sleep(25)
bs = BeautifulSoup(dr.page_source, "html.parser") #scraping poocoin.app/ape
tbody = bs.tbody
trs = tbody.contents
for tr in trs:
name, links = tr.contents[0:3:2]
fixed_name = name.a.text
for names in tr.contents[0::3]:
name_link = (str(names)).split("=")
for k in name_link:
if k[0:8] == "\"/tokens":
namelinks = k
else:
pass
tryingfix_links = namelinks.split("\"")[1]
fixed_name_links = "https://poocoin.app" + tryingfix_links
for contracts in links.contents[0::2]:
for bnbholders in links.contents[1::2]:
cntr = (str(contracts)).split("=") #trying to fix all names
for i in cntr:
if i[0:5] == "\"http" and i[21] == "t":
contract_link = i[26:-9]
elif i[0:5] == "\"http" and i[21] == "a":
holder_link = i
else:
pass
else:
pass
fixed_contract_links = contract_link[1:-5]
fixed_holder_links = holder_link[1:-5]
liquidity = (str(bnbholders)).split("=")
for j in liquidity:
if j[0:5] == "\"http":
bnbholder_link = j
else:
pass
fixed_bnbholder_link = bnbholder_link[1:-5]
crypto_name_links = fixed_name_links.splitlines()
crypto_name = fixed_name.splitlines()
crypto_contracts = fixed_contract_links.splitlines()
crypto_holders = fixed_holder_links.splitlines()
crypto_liquidity = fixed_bnbholder_link.splitlines()
for table_name in crypto_name:
for table_name_link in crypto_name_links:
for table_contract in crypto_contracts:
for table_holders in crypto_holders:
for table_liquidity in crypto_liquidity:
tuplehere = (counter, table_name, table_name_link, table_contract, table_holders, table_liquidity)
crypto_list.append(tuplehere)
print("Found",counter,": ",tuplehere[1])
counter += 1 #Storing data in a list
def check_rugpull(crypto_list,options,counter):
for tuples in crypto_list:
url = "https://bscscan.com/token/generic-tokenholders2?m=normal&a=" + str(tuples[3])
#optionrug.add_argument("--headless")
#optionrug.add_argument("--log-level=3")
rugdr = webdriver.Chrome(PATH, chrome_options=options)
rugdr.get(url)
sleep(4)
serialno = 0
rug_soup = BeautifulSoup(rugdr.page_source, 'html.parser')
rug_list = []
try:
itag = rug_soup.find_all(["i"], class_ = "far fa-file-alt text-secondary")
span = itag[0].parent
td = span.parent
tr = td.parent
one = tr.find("td")
onec = one.text
except:
itag = None
onec = 0
for row in rug_soup.select("tr:has(td)"):
tds = [td.get_text(strip=True) for td in row.select("td")]
fix = tds[1].splitlines()
for i in fix:
rug_list.append(i)
try:
first_address = rug_list[0][0:6]
second_address = rug_list[1][0:6]
except IndexError:
second_address = None
if ((first_address == "0x0000") or (first_address == "Burn A")) and (second_address == "Pancak"):
no_rugpulls.append(tuples)
elif (first_address == "Pancak") and ((second_address == "0x0000") or (second_address == "Burn A")):
no_rugpulls.append(tuples)
elif (first_address == "0x0000") or ((first_address == "Burn A") and (onec == "2")):
no_rugpulls.append(tuples)
elif ((first_address == "Pancak") or (first_address == "Burn A")) and (onec == "2"):
no_rugpulls.append(tuples)
elif (first_address == "Pancak"):
no_rugpulls.append(tuples)
else:
discarded.append(tuples)
print("Out of ",counter-1,", Non-Rugpulls are",len(no_rugpulls))
print("They are:")
for b in no_rugpulls:
print(" ",b[1])
def check_honeypot(no_rugpulls,options):
for data in no_rugpulls:
honey_dr = webdriver.Chrome(PATH, chrome_options=options)
honey_url = data[2]
honey_dr.get(honey_url)
sleep(7)
honey_soup = BeautifulSoup(honey_dr.page_source,'html.parser')
honey_dr.close()
tags = honey_soup.find_all(text="Holders")
pv2 = tags[0].parent
link = pv2['href']
bnblpaddress = link[26:-9]
bnburl = "https://bscscan.com/token/generic-tokenholders2?m=normal&a=" + bnblpaddress + "&p=1"
bnbdr = webdriver.Chrome(PATH)
bnbdr.get(bnburl)
sleep(4)
bnbsoup = BeautifulSoup(bnbdr.page_source,'html.parser')
bnbdr.close()
honeylist = []
try:
itag = bnbsoup.find_all(["i"], class_ = "far fa-file-alt text-secondary")
span = itag[0].parent
td = span.parent
tr = td.parent
one = tr.find("td")
onec = one.text
except:
itag = None
onec = 0
for rowsr in bnbsoup.select("tr:has(td)"):
tdsr = [tdr.get_text(strip=True) for tdr in rowsr.select("td")]
try:
fixr = tdsr[1].splitlines()
for r in fixr:
honeylist.append(r)
except (IndexError, NameError) as e:
pass
try:
firstbnbaddress = honeylist[0][0:6]
except IndexError:
firstbnbaddress = "0000"
if (firstbnbaddress == "0x0000") or (firstbnbaddress == "Burn A") or (firstbnbaddress[0:4] == "Legi"):
no_honeypots.append(data)
elif onec == "1":
no_honeypots.append(data)
else:
discarded.append(data)
print("Out of Total",counter-1,", I chose after checking for Rugpulls and Honeypots",len(no_honeypots))
print("They are:")
for n in no_honeypots:
print(" ",n[1])
def check_liquidity(no_honeypots,options):
for values in no_honeypots:
liq = webdriver.Chrome(executable_path=PATH, chrome_options=options)
liq.get(values[2])
sleep(5)
liq_soup = BeautifulSoup(liq.page_source,'htmml.parser')
try:
span_tag = liq_soup.find_all(["span"], class_ = "text-success")
liquid_value = span_tag[2].text
except:
liquid_value = "Null"
last_tuple = values + (liquid_value,)
print(last_tuple)
if __name__ == '__main__':
get_data(counter)
print()
print("Now Checking for rugpulls and Honeypots")
check_rugpull(crypto_list, options, counter)
check_honeypot(no_rugpulls,options)
check_liquidity(no_honeypots,options)
print("Printing")
directory = os.getcwd()
todaysdate = str(date.today())
path = "C:\\Users\\tusha\\Desktop\\python\\excel\\" + todaysdate
my_file = Path(path)
if my_file.is_file():
time = str(datetime.now().time())
fix_time = time.replace(':', '-')
wb = load_workbook("excel\\" + todaysdate + ".xlsx")
wb.create_sheet(title= fix_time, index=0)
sheet = wb.active
sheet['A1'] = "S.No."
sheet['B1'] = "Name"
sheet['C1'] = "Poocoin Link"
sheet['D1'] = "Contract"
sheet['E1'] = "Holders Link"
sheet['F1'] = "BNB Holders Link"
sheet.column_dimensions['B'].width = 30
sheet.column_dimensions['C'].width = 30
sheet.column_dimensions['D'].width = 25
sheet.column_dimensions['E'].width = 25
sheet.column_dimensions['F'].width = 25
for data in no_honeypots:
sheet.append(data)
for col in range(1,7):
sheet[get_column_letter(col) + '1'].font = Font(bold=True)
for data in discarded:
sheet.append(data)
for col in range(1,7):
for row in range(len(no_honeypots)+2 , len(discarded) + len(discarded)):
sheet[get_column_letter(col) + str(row)].font = Font(bold=True, color= "E2320B")
sheet.insert_rows(len(no_honeypots)+2)
sheet.insert_rows(len(no_honeypots)+2)
wb.save(filename = "excel\\" + todaysdate + ".xlsx")
else:
time = str(datetime.now().time())
fix_time = time.replace(':', '-')
destinaton = "excel\\" + todaysdate + ".xlsx"
wb = Workbook()
wb.create_sheet(title= fix_time, index=0)
sheet = wb.active
sheet['A1'] = "S.No."
sheet['B1'] = "Name"
sheet['C1'] = "Poocoin Link"
sheet['D1'] = "Contract"
sheet['E1'] = "Holders Link"
sheet['F1'] = "BNB Holders Link"
sheet.column_dimensions['B'].width = 30
sheet.column_dimensions['C'].width = 30
sheet.column_dimensions['D'].width = 25
sheet.column_dimensions['E'].width = 25
sheet.column_dimensions['F'].width = 25
for data in no_honeypots:
sheet.append(data)
for col in range(1,7):
sheet[get_column_letter(col) + '1'].font = Font(bold=True)
for data in discarded:
sheet.append(data)
for col in range(1,7):
for row in range(len(no_honeypots)+2 , len(discarded) + len(discarded)):
sheet[get_column_letter(col) + str(row)].font = Font(bold=True, color= "E2320B")
sheet.insert_rows(len(no_honeypots)+2)
sheet.insert_rows(len(no_honeypots)+2)
wb.save(filename = destinaton)
|
from flask import Flask
from flask import render_template
import os
import json
import time
import urllib2
app = Flask(__name__)
def get_weather():
url = "http://api.openweathermap.org/data/2.5/forecast/daily?q=London&cnt=10&mode=json&units=metric"
response = urllib2.urlopen(url).read()
return response
@app.route("/")
def index():
data = json.loads(get_weather())
day = time.strftime('%d %B', time.localtime(data.get('list')[0].get('dt')))
mini = data.get("list")[0].get("temp").get("min")
maxi = data.get("list")[0].get("temp").get("max")
description = data.get("list")[0].get("weather")[0].get("description")
return render_template("index.html", day=day, mini=mini, maxi=maxi, description=description)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True)
|
import numpy as np
from datetime import datetime, timedelta
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import sys
from tools_TC202010 import read_score
def main( top='', stime=datetime(2020, 9, 1, 0 ), etime=datetime(2020, 9, 1, 0) ):
time = stime
while time < etime:
data_ = read_score( top=top, time=time )
if time == stime:
# initiate a dictionary
data = dict( data_ )
data.update( {'time': [ stime, stime ] } )
else:
for key in data.keys():
if key == 'time':
data[key] = data[key] + [ time, time ]
else:
data[key] = data[key] + data_[key]
time += timedelta( hours=6 )
fig, ( ( ax1, ax2, ax3, ax4, ax5)) = plt.subplots( 5, 1, figsize=( 8, 9.5 ) )
ax_l = [ ax1, ax2, ax3, ax4, ax5]
tit_l = [ 'U', 'V', 'T', 'PS', 'Q' ]
ymax_l = [ 12, 12, 12, 10, 5 ]
ymin_l = [ -2, -2, -2, -2, -1 ]
for key in data.keys():
if ( 'RMSE_U' in key ) or ( 'BIAS_U' in key ):
ax = ax1
fac = 1.0
elif ( 'RMSE_V' in key ) or ( 'BIAS_V' in key ):
ax = ax2
fac = 1.0
elif ( 'RMSE_T' in key ) or ( 'BIAS_T' in key ):
ax = ax3
fac = 1.0
elif ( 'RMSE_PS' in key ) or ( 'BIAS_PS' in key ):
ax = ax4
fac = 1.e-2
elif ( 'RMSE_Q' in key ) or ( 'BIAS_Q' in key ):
ax = ax5
fac = 1.e3
else:
print( "skip ", key )
continue
if 'RMSE' in key:
ls = 'solid'
c = 'k'
elif 'BIAS' in key:
ls = 'dashed'
ls = 'solid'
c = 'b'
#ax.plot( data['time'], data[key], color=c, ls=ls )
ax.plot( data['time'], np.array( data[key] )*fac, color=c, ls=ls )
stime_ = stime - timedelta( hours=stime.hour )
etime_ = etime - timedelta( hours=etime.hour )
for i, ax in enumerate( ax_l ):
ax.text( 0.5, 0.99, tit_l[i],
fontsize=13, transform=ax.transAxes,
ha="center",
va='top',
)
ax.hlines( y=0.0, xmin=stime_, xmax=etime_, ls='dotted',
color='k', lw=1.0 )
ax.set_xlim( stime_, etime_ )
ax.set_ylim( ymin_l[i], ymax_l[i] )
if i == 4:
ax.xaxis.set_major_locator( mdates.HourLocator(interval=24) )
#ax.xaxis.set_major_formatter( mdates.DateFormatter('%d%H\n%m/%d') )
ax.xaxis.set_major_formatter( mdates.DateFormatter('%d') )
#ax.xaxis.set_major_formatter( mdates.DateFormatter('%m/%d') )
else:
ax.set_xticks([], [])
plt.show()
sys.exit()
# time = stime
stime = datetime( 2020, 8, 16, 6, 0 )
etime = datetime( 2020, 9, 2, 0, 0 )
top = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/scale-5.4.3/OUTPUT/TC2020/D1/D1_20210629"
stime = datetime( 2017, 6, 16, 6, 0 )
etime = datetime( 2017, 7, 5, 0, 0 )
top = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/scale-5.4.3/OUTPUT/KYUSHU2017_D1_20210629"
time = stime
main( top=top, stime=stime, etime=etime, )
|
from PIL import Image
from utils import http
from utils.endpoint import Endpoint, setup
from utils.perspective import convert_fit, skew
@setup
class SquidwardsTV(Endpoint):
def generate(self, kwargs):
image_url = kwargs['image']
base = Image.open(self.get_asset('squidwardstv.bmp'))
white = Image.new('RGBA', (base.width, base.height), 'white')
img = convert_fit(http.get_image(image_url), (800, 600))
img = skew(img, [(530, 107), (983, 278), (783, 611), (362, 434)])
white.paste(img, (0, 0), img)
white.paste(base, (0, 0), base)
white = white.convert('RGB')
return self.send_file(white, format='jpeg')
|
import time
import refresh
# def GetPageFloderNow(driver,k,p1,p2):
# b = 0
# while b == 0:
# try:
# time.sleep(1)
# page_floder_now = int(driver.find_element_by_xpath(
# "/html/body/div[1]/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/ul").find_element_by_xpath(
# "li[@class='number active']").text)
# time.sleep(1)
# b = 1
# except:
# refresh.refreshFolder(driver,k,p1,p2)
#
# return page_floder_now
def GetPageFolderMax(driver):
b = 0
while b == 0:
try:
text = []
time.sleep(1)
sentry = driver.find_element_by_xpath(
"/html/body/div[1]/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/ul").find_elements_by_xpath(
"li")
time.sleep(1)
sentry_1 = 1
for sentry_2 in sentry:
if sentry_2.text == '':
continue
text.insert(sentry_1, int(sentry_2.text))
page_folder_all = max(text)
b = 1
except:
pass
return int(page_folder_all)
def GetFloderMax(driver,k,p1,p2,FolderNow):
b = 0
while b == 0:
try:
FloderMax = 0
time.sleep(1)
sentry_1 = driver.find_element_by_xpath(
"/html/body/div[1]/div/div[2]/div[2]/div[2]/div[3]/div[1]"
).find_elements_by_class_name("report-list-item")
time.sleep(1)
for sentry_2 in sentry_1:
FloderMax = FloderMax + 1
b = 1
except:
refresh.refreshFolder(driver,k,p1,p2,FolderNow)
return FloderMax
# def GetPageFileNow(driver,k,p1,p2,p3,p4):
# b = 0
# while b == 0:
# try:
# time.sleep(1)
# page_file_now = int(driver.find_element_by_xpath(
# "/html/body/div[1]/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/ul").find_element_by_xpath(
# "li[@class='number active']").text)
# b = 1
# except:
# refresh.refreshFile(driver,k,p1,p2,p3,p4)
#
# return int(page_file_now)
def GetFileMax(driver,k,p1,p2,p3,p4,FolderNow,FileNow):
b = 0
while b == 0:
try:
time.sleep(1)
FileMax = 0
sentry_1 = driver.find_element_by_xpath(
"/html/body/div[1]/div/div[2]/div[2]/div[2]/div[3]/div[1]"
).find_elements_by_xpath("./div[@class='report-list-item']")
for sentry_2 in sentry_1:
FileMax = FileMax + 1
b = 1
except:
refresh.refreshFile(driver,k,p1,p2,p3,p4,FolderNow,FileNow)
return FileMax
def GetPageFileMax(driver):
b = 0
while b == 0:
try:
time.sleep(1)
text = []
time.sleep(1)
sentry = driver.find_element_by_xpath(
"/html/body/div[1]/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/ul").find_elements_by_xpath(
"li")
sentry_1 = 1
for sentry_2 in sentry:
if sentry_2.text == '':
continue
text.insert(sentry_1, int(sentry_2.text))
page_file_all = max(text)
b = 1
except:
pass
return page_file_all
|
from collections import defaultdict
import os
import time
import random
import torch
import torch.nn as nn
#import model_lstm as mn
import model as mn
import numpy as np
import argparse
from vocab import Vocab
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--train", type=str, default="data/sst-train.txt")
parser.add_argument("--dev", type=str, default="data/sst-dev.txt")
parser.add_argument("--test", type=str, default="data/sst-test.txt")
parser.add_argument("--emb_file", type=str, default="glove.6B.300d.txt")
parser.add_argument("--emb_size", type=int, default=300)
parser.add_argument("--hid_size", type=int, default=300)
parser.add_argument("--hid_layer", type=int, default=3)
parser.add_argument("--word_drop", type=float, default=0)
parser.add_argument("--emb_drop", type=float, default=0)
parser.add_argument("--hid_drop", type=float, default=0.333)
parser.add_argument("--pooling_method", type=str, default="avg", choices=["sum", "avg", "max"])
parser.add_argument("--grad_clip", type=float, default=5)
parser.add_argument("--max_train_epoch", type=int, default=10)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--lrate", type=float, default=0.005)
parser.add_argument("--lrate_decay", type=float, default=0) # 0 means no decay!
parser.add_argument("--mrate", type=float, default=0.85)
parser.add_argument("--log_niter", type=int, default=100)
parser.add_argument("--eval_niter", type=int, default=500)
parser.add_argument("--model", type=str, default="model.pt") # save/load model name
parser.add_argument("--dev_output", type=str, default="output.dev.txt") # output for dev
parser.add_argument("--test_output", type=str, default="output.test.txt") # output for dev
parser.add_argument("--architecture", type=str, default="DAN") # output for dev
args = parser.parse_args()
print(f"RUN: {vars(args)}")
return args
def read_dataset(filename):
dataset = []
with open(filename, "r", encoding="utf-8") as f:
for line in f:
tag, words = line.lower().strip().split(" ||| ")
dataset.append((words.split(' '), tag))
return dataset
def convert_text_to_ids(dataset, word_vocab, tag_vocab):
data = []
for words, tag in dataset:
word_ids = [word_vocab[w] for w in words]
data.append((word_ids, tag_vocab[tag]))
return data
def data_iter(data, batch_size, shuffle=True):
"""
Randomly shuffle training data, and partition into batches.
Each mini-batch may contain sentences with different lengths.
"""
if shuffle:
# Shuffle training data.
np.random.shuffle(data)
batch_num = int(np.ceil(len(data) / float(batch_size)))
for i in range(batch_num):
cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
sents = [data[i * batch_size + b][0] for b in range(cur_batch_size)]
tags = [data[i * batch_size + b][1] for b in range(cur_batch_size)]
yield sents, tags
def pad_sentences(sents, pad_id):
"""
Adding pad_id to sentences in a mini-batch to ensure that
all augmented sentences in a mini-batch have the same word length.
Args:
sents: list(list(int)), a list of a list of word ids
pad_id: the word id of the "<pad>" token
Return:
aug_sents: list(list(int)), |s_1| == |s_i|, for s_i in sents
"""
max_seq_length = 0
for sentence in sents:
if len(sentence)>max_seq_length:
max_seq_length = len(sentence)
for i in range(len(sents)):
if len(sents[i])<max_seq_length:
sents[i]+=[pad_id]*(max_seq_length-len(sents[i]))
#print(sents[i])
return sents
def compute_grad_norm(model, norm_type=2):
"""
Computer the gradients' L2 norm
"""
total_norm = 0.0
for name, p in model.named_parameters():
if p.grad is None:
continue
p_norm = p.grad.norm(norm_type) ** (norm_type)
total_norm += p_norm
return total_norm ** (1. / norm_type)
def compute_param_norm(model, norm_type=2):
"""
Computer the model's parameters' L2 norm
"""
total_norm = 0.0
for p in model.parameters():
p_norm = p.norm(norm_type) ** (norm_type)
total_norm += p_norm
return total_norm ** (1. / norm_type)
def evaluate(dataset, model, device, tag_vocab=None, filename=None):
"""
Evaluate test/dev set
"""
model.eval()
predicts = []
acc = 0
for words, tag in dataset:
X = torch.LongTensor([words]).to(device)
scores = model(X)
y_pred = scores.argmax(1)[0].item()
predicts.append(y_pred)
acc += int(y_pred == tag)
print(f' -Accuracy: {acc/len(predicts):.4f} ({acc}/{len(predicts)})')
if filename:
with open(filename, 'w') as f:
for y_pred in predicts:
# convert tag_id to its original label
tag = tag_vocab.id2word[y_pred]
f.write(f'{tag}\n')
print(f' -Save predictions to {filename}')
model.train()
return acc/len(predicts)
def main():
args = get_args()
_seed = os.environ.get("MINNN_SEED", 12341)
random.seed(_seed)
np.random.seed(_seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Read datasets
train_text = read_dataset(args.train)
dev_text = read_dataset(args.dev)
test_text = read_dataset(args.test)
# Build vocabularies for words and tags from training data
word_vocab = Vocab(pad=True, unk=True)
word_vocab.build(list(zip(*train_text))[0])
tag_vocab = Vocab()
tag_vocab.build(list(zip(*train_text))[1])
# Convert word string to word ids
train_data = convert_text_to_ids(train_text, word_vocab, tag_vocab)
dev_data = convert_text_to_ids(dev_text, word_vocab, tag_vocab)
test_data = convert_text_to_ids(test_text, word_vocab, tag_vocab)
# Create a model
nwords = len(word_vocab)
ntags = len(tag_vocab)
print('nwords', nwords, 'ntags', ntags)
if args.architecture=="LSTM":
model = mn.LSTMModel(args, word_vocab, len(tag_vocab)).to(device)
else:
model = mn.DanModel(args, word_vocab, len(tag_vocab)).to(device)
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adagrad(model.parameters(), lr=args.lrate, weight_decay=1e-5,lr_decay=args.lrate_decay)
#optimizer = torch.optim.AdamW(model.parameters(), lr=0.001,weight_decay=1e-5)
#print(model.summary())
# Training
start_time = time.time()
train_iter = 0
train_loss = train_example = train_correct = 0
best_records = (0, 0) # [best_iter, best_accuracy]
model.train()
for epoch in range(args.max_train_epoch):
for batch in data_iter(train_data, batch_size=args.batch_size, shuffle=True):
train_iter += 1
model.train()
X = pad_sentences(batch[0], word_vocab['<pad>'])
X = torch.LongTensor(X).to(device)
Y = torch.LongTensor(batch[1]).to(device)
#print("X=",X)
#print("Y=",Y)
#sys.exit()
# Forward pass: compute the unnormalized scores for P(Y|X)
scores = model(X)
loss = loss_func(scores, Y)
# Backpropagation: compute gradients for all parameters
optimizer.zero_grad()
loss.backward()
if args.grad_clip > 0:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
# Update model's parameters with gradients
optimizer.step()
train_loss += loss.item() * len(batch[0])
train_example += len(batch[0])
Y_pred = scores.argmax(1)
train_correct += (Y_pred == Y).sum().item()
if train_iter % args.log_niter == 0:
gnorm = compute_grad_norm(model)
pnorm = compute_param_norm(model)
print(f'Epoch {epoch}, iter {train_iter}, train set: '\
f'loss={train_loss/train_example:.4f}, '\
f'accuracy={train_correct/train_example:.2f} ({train_correct}/{train_example}), '\
f'gradient_norm={gnorm:.2f}, params_norm={pnorm:.2f}, '\
f'time={time.time()-start_time:.2f}s')
train_loss = train_example = train_correct = 0
if train_iter % args.eval_niter == 0:
print(f'Evaluate dev data:')
dev_accuracy = evaluate(dev_data, model, device)
if dev_accuracy > best_records[1]:
print(f' -Update best model at {train_iter}, dev accuracy={dev_accuracy:.4f}')
best_records = (train_iter, dev_accuracy)
model.save(args.model)
# Load the best model
model.load(args.model)
evaluate(test_data, model, device, tag_vocab, filename=args.test_output)
evaluate(dev_data, model, device, tag_vocab, filename=args.dev_output)
if __name__ == '__main__':
main()
|
import coloredlogs
from colorama import Fore, Style
from datetime import datetime, timezone
import logging
import verboselogs
import getpass
import json
import os
import praw
from pprint import pprint
import re
from saveddit.submission_downloader import SubmissionDownloader
from saveddit.subreddit_downloader import SubredditDownloader
from saveddit.search_config import SearchConfig
import sys
from tqdm import tqdm
class SearchSubreddits:
config = SubredditDownloader.config
REDDIT_CLIENT_ID = config['reddit_client_id']
REDDIT_CLIENT_SECRET = config['reddit_client_secret']
IMGUR_CLIENT_ID = config['imgur_client_id']
REDDIT_USERNAME = None
try:
REDDIT_USERNAME = config['reddit_username']
except Exception as e:
pass
REDDIT_PASSWORD = None
if REDDIT_USERNAME:
if sys.stdin.isatty():
print("Username: " + REDDIT_USERNAME)
REDDIT_PASSWORD = getpass.getpass("Password: ")
else:
# echo "foobar" > password
# saveddit user .... < password
REDDIT_PASSWORD = sys.stdin.readline().rstrip()
def __init__(self, subreddit_names):
self.logger = verboselogs.VerboseLogger(__name__)
level_styles = {
'critical': {'bold': True, 'color': 'red'},
'debug': {'color': 'green'},
'error': {'color': 'red'},
'info': {'color': 'white'},
'notice': {'color': 'magenta'},
'spam': {'color': 'white', 'faint': True},
'success': {'bold': True, 'color': 'green'},
'verbose': {'color': 'blue'},
'warning': {'color': 'yellow'}
}
coloredlogs.install(level='SPAM', logger=self.logger,
fmt='%(message)s', level_styles=level_styles)
if not SearchSubreddits.REDDIT_USERNAME:
self.logger.error("`reddit_username` in user_config.yaml is empty")
self.logger.error("If you plan on using the user API of saveddit, then add your username to user_config.yaml")
print("Exiting now")
exit()
else:
if not len(SearchSubreddits.REDDIT_PASSWORD):
if sys.stdin.isatty():
print("Username: " + REDDIT_USERNAME)
REDDIT_PASSWORD = getpass.getpass("Password: ")
else:
# echo "foobar" > password
# saveddit user .... < password
REDDIT_PASSWORD = sys.stdin.readline().rstrip()
self.reddit = praw.Reddit(
client_id=SearchSubreddits.REDDIT_CLIENT_ID,
client_secret=SearchSubreddits.REDDIT_CLIENT_SECRET,
user_agent="saveddit (by /u/p_ranav)"
)
self.multireddit_name = "+".join(subreddit_names)
self.subreddit = self.reddit.subreddit(self.multireddit_name)
def download(self, args):
output_path = args.o
query = args.q
sort = args.s
syntax = SearchConfig.DEFAULT_SYNTAX
time_filter = args.t
include_nsfw = args.include_nsfw
skip_comments = args.skip_comments
skip_videos = args.skip_videos
skip_meta = args.skip_meta
comment_limit = 0 # top-level comments ONLY
self.logger.verbose("Searching '" + query + "' in " + self.multireddit_name + ", sorted by " + sort)
if include_nsfw:
self.logger.spam(" * Including NSFW results")
search_dir = os.path.join(os.path.join(os.path.join(os.path.join(os.path.join(
output_path, "www.reddit.com"), "q"), query), self.multireddit_name), sort)
if not os.path.exists(search_dir):
os.makedirs(search_dir)
search_results = None
if include_nsfw:
search_params = {"include_over_18": "on"}
search_results = self.subreddit.search(query, sort, syntax, time_filter, params=search_params)
else:
search_results = self.subreddit.search(query, sort, syntax, time_filter)
results_found = False
for i, submission in enumerate(search_results):
if not results_found:
results_found = True
SubmissionDownloader(submission, i, self.logger, search_dir,
skip_videos, skip_meta, skip_comments, comment_limit,
{'imgur_client_id': SubredditDownloader.IMGUR_CLIENT_ID})
if not results_found:
self.logger.spam(" * No results found")
|
dias=int(input('dias alugados '))
km=float(input('quantidade de Km rodados '))
diasn = dias*60
kmm = km *0.15
print('um carro alugado por {} dias e rodado {}km devera pagar {:.2f} Reais'.format(dias,km,diasn+kmm))
|
from django.apps import AppConfig
class CompassWebsiteAppConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "compass_website_app"
|
import fsaexporter.utils as utils
from fsaexporter.ds.page import DeclarationPage
BASE_DECLARATION_PAYLOAD = {"size": 100, "page": 0,
"filter": {"status": [], "idDeclType": [], "idCertObjectType": [], "idProductType": [],
"idGroupRU": [], "idGroupEEU": [], "idTechReg": [], "idApplicantType": [],
"regDate": {"minDate": None, "maxDate": None},
"endDate": {"minDate": None, "maxDate": None}, "columnsSearch": [
{"name": "number", "search": "", "type": 0, "translated": False}],
"idProductOrigin": [], "idProductEEU": [], "idProductRU": [], "idDeclScheme": [],
"awaitForApprove": None, "awaitOperatorCheck": None, "editApp": None,
"violationSendDate": None},
"columnsSort": [{"column": "declDate", "sort": "DESC"}]}
class DeclarationDownloader:
def __init__(self, dec_name="", reg_min_date=None, reg_max_date=None, end_min_date=None, end_max_date=None):
"""
:param dec_name: Номер декларации о соответствии
:param reg_min_date: Минимаьлная дата регистрации декларации
:param reg_max_date: Максимальная дата регистрации декларации
:param end_min_date: Минимальная дата окончания действия декларации
:param end_max_date: Максимальная дата окончания действия декларации
"""
self.client = utils.FsaDownloader()
self.payload = BASE_DECLARATION_PAYLOAD.copy()
self.payload["filter"]["columnsSearch"][0]["search"] = dec_name
self.payload["filter"]["regDate"]["minDate"] = utils.datetime_to_fsa(reg_min_date)
self.payload["filter"]["regDate"]["maxDate"] = utils.datetime_to_fsa(reg_max_date)
self.payload["filter"]["endDate"]["minDate"] = utils.datetime_to_fsa(end_min_date)
self.payload["filter"]["endDate"]["minDate"] = utils.datetime_to_fsa(end_max_date)
self.current_page = 0
self.current_declaration = 0
self.next_page = DeclarationPage.init(self.payload, self.current_page, self.client)
self.p = None
def __aiter__(self):
return self
async def __anext__(self):
if self.p is None:
self.p: DeclarationPage = await self.next_page
self.next_page = DeclarationPage.init(self.payload, self.current_page, self.client)
if self.p.empty():
raise StopAsyncIteration
if self.current_declaration >= len(self.p.declarations):
self.p: DeclarationPage = await self.next_page
self.current_page += 1
self.current_declaration = 0
self.next_page = DeclarationPage.init(self.payload, self.current_page, self.client)
self.current_declaration += 1
return self.p.declarations[self.current_declaration - 1]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bowtie import cache
from bowtie.control import DropDown, Slider
from bowtie.control import Button, Switch, Number
from bowtie.visual import Plotly
import numpy as np
import pandas as pd
import plotlywrapper as pw
from sklearn import manifold
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import MinMaxScaler
iris = pd.read_csv('./iris.csv')
iris = iris.drop(iris.columns[0], axis=1)
attrs = iris.columns[:-1]
species = iris.Species.unique()
algos = ['MDS', 'TSNE', 'Locally Linear Embedding']
algo_select = DropDown(caption='Manifold Algorithm', labels=algos, values=algos)
species_select = DropDown(caption='Species', labels=species, values=species, multi=True)
normalize_switch = Switch(caption='Normalize')
random_seed = Number(caption='Seed', start=0, minimum=0, step=1)
neighbor_slider = Slider(caption='Neighbors', start=1, minimum=1, maximum=20, step=1)
perplex_slider = Slider(caption='Perplexity', start=30, minimum=1, maximum=200, step=1)
replot_button = Button(label='Replot')
anomplot = Plotly()
attrplot = Plotly()
def get_species_data(species, normalize):
species = [s['value'] for s in species]
data = iris.query('Species in @species').iloc[:, :-1]
if normalize:
mms = MinMaxScaler()
data.iloc[:] = mms.fit_transform(data)
return data
def baseviz(algo, normalize, neighbors, species, perplex):
# attr_data = get_species_data(species, normalize).iloc[:, :4]
if (algo is None or
normalize is None or
species is None):
return
baseviz2(algo, normalize, neighbors, species, perplex)
def replot():
algo = algo_select.get()
neighbors = neighbor_slider.get()
normalize = normalize_switch.get()
species = species_select.get()
perplex = perplex_slider.get()
baseviz2(algo, normalize, neighbors, species, perplex)
def baseviz2(algo, normalize, neighbors, species, perplex):
algo = algo['label']
attr_data = get_species_data(species, normalize)
anomplot.progress.do_percent(0)
anomplot.progress.do_visible(True)
seed = random_seed.get()
if algo == 'TSNE':
mnf = manifold.TSNE(random_state=seed, perplexity=perplex)
elif algo == 'MDS':
mnf = manifold.MDS(random_state=seed)
elif algo == 'Locally Linear Embedding':
mnf = manifold.LocallyLinearEmbedding(random_state=seed)
else:
print(algo)
anomplot.progress.do_inc(2)
reduced = mnf.fit_transform(attr_data)
anomplot.progress.do_inc(5)
nn = NearestNeighbors(neighbors)
nn.fit(reduced)
anomplot.progress.do_inc(3)
dists = nn.kneighbors()[0][:, -1]
msize = 60 * dists / max(dists)
chart = pw.Chart()
for i, (x, y) in enumerate(reduced):
chart += pw.scatter(x, y, markersize=msize[i], color='blue', opacity=0.5)
anomplot.progress.do_inc(1)
chart.legend(False)
chart.layout['hovermode'] = 'closest'
anomplot.progress.do_visible(False)
cache.save('anomaly', chart.dict)
anomplot.do_all(chart.dict)
chart = attr_data.T.plotly.line(opacity=0.5)
chart.legend(False)
chart.layout['hovermode'] = 'closest'
attrplot.do_all(chart.dict)
def anom_click_point(point):
nth = point['curve']
normalize = normalize_switch.get()
species = species_select.get()
attr_data = get_species_data(species, normalize)
pix = attr_data.index[nth]
chart = pw.Chart()
for i, (idx, row) in enumerate(attr_data.iterrows()):
if i == nth:
chart += row.plotly.line(width=10)
else:
chart += row.plotly.line(opacity=0.5)
chart.legend(False)
chart.layout['hovermode'] = 'closest'
attrplot.do_all(chart.dict)
def anom_select_points(points):
print(points)
def attr_click_point(point):
nth = point['curve']
algo = algo_select.get()
neighbors = neighbor_slider.get()
normalize = normalize_switch.get()
species = species_select.get()
perplex = perplex_slider.get()
data = cache.load('anomaly')
chart = pw.Chart(data=data['data'], layout=data['layout'])
chart.data[nth]['line']['color'] = 'red'
chart.data[nth]['opacity'] = 1
anomplot.do_all(chart.dict)
def attr_select_points(points):
print(points)
from bowtie import command
@command
def construct():
from bowtie import Layout
description = """
Iris Anomalies
==============
"""
layout = Layout(description=description, title='Iris Anomaly', background_color='LavenderBlush', debug=False)
layout.add_visual(anomplot)
layout.add_visual(attrplot, next_row=True)
layout.add_controller(algo_select)
layout.add_controller(species_select)
layout.add_controller(normalize_switch)
layout.add_controller(random_seed)
layout.add_controller(neighbor_slider)
layout.add_controller(perplex_slider)
layout.add_controller(replot_button)
layout.subscribe(baseviz,
algo_select.on_change,
normalize_switch.on_switch,
neighbor_slider.on_change,
species_select.on_change,
perplex_slider.on_change)
layout.subscribe(replot, replot_button.on_click)
layout.subscribe(anom_click_point, anomplot.on_click)
layout.subscribe(anom_click_point, anomplot.on_hover)
layout.subscribe(anom_select_points, anomplot.on_select)
layout.subscribe(attr_click_point, attrplot.on_click)
layout.subscribe(attr_click_point, attrplot.on_hover)
layout.subscribe(attr_select_points, attrplot.on_select)
layout.build()
|
# limits.py
import numpy as np
class PTLimits(object):
"""The Pressure Transducer Limits determine when 'Safe' mode breaks.
Attributes
----------
pt : pressure transducers [8]
a : pressure transducer PT-PR-110
b : pressure transducer PT-OX-120
c : pressure transducer PT-FU-130
d : pressure transducer PT-OX-210
e : pressure transducer PT-FU-310
f : pressure transducer PT-OX-220
g : pressure transducer PT-FU-320
h : pressure transducer PT-CC-410
"""
def __init__(self, pt_a=3000, pt_b=800, pt_c=800, pt_d=800, pt_e=800, pt_f=750, pt_g=750, pt_h=900):
self._pt_a = pt_a
self._pt_b = pt_b
self._pt_c = pt_c
self._pt_d = pt_d
self._pt_e = pt_e
self._pt_f = pt_f
self._pt_g = pt_g
self._pt_h = pt_h
self._pt_limits = [pt_a, pt_b, pt_c, pt_d, pt_e, pt_f, pt_g, pt_h] #Pressure Transducers
@property
def pt_limits(self):
self._pt_limits = [self._pt_a, self._pt_b, self._pt_c, self._pt_d, self._pt_e, self._pt_f, self._pt_g, self._pt_h]
return self._pt_limits
@property
def pt_a(self):
return self._pt_a #Pressure Transducer a PT-PR-110
@property
def pt_b(self):
return self._pt_b #Pressure Transducer b PT-OX-120
@property
def pt_c(self):
return self._pt_c #Pressure Transducer c PT-FU-130
@property
def pt_d(self):
return self._pt_d #Pressure Transducer d PT-OX-210
@property
def pt_e(self):
return self._pt_e #Pressure Transducer e PT-FU-310
@property
def pt_f(self):
return self._pt_f #Pressure Transducer f PT-OX-220
@property
def pt_g(self):
return self._pt_g #Pressure Transducer g PT-FU-320
@property
def pt_h(self):
return self._pt_h #Pressure Transducer h PT-CC-410
@pt_a.setter
def pt_a(self, pt_a):
self._pt_a = pt_a #Set Pressure Transducer a PT-PR-110 Limit
@pt_b.setter
def pt_b(self, pt_b):
self._pt_b = pt_b #Set Pressure Transducer b PT-OX-120 Limit
@pt_c.setter
def pt_c(self, pt_c):
self._pt_c = pt_c #Set Pressure Transducer c PT-FU-130 Limit
@pt_d.setter
def pt_d(self, pt_d):
self._pt_d = pt_d #Set Pressure Transducer d PT-OX-210 Limit
@pt_e.setter
def pt_e(self, pt_e):
self._pt_e = pt_e #Set Pressure Transducer e PT-FU-310 Limit
@pt_f.setter
def pt_f(self, pt_f):
self._pt_f = pt_f #Set Pressure Transducer f PT-OX-220 Limit
@pt_g.setter
def pt_g(self, pt_g):
self._pt_g = pt_g #Set Pressure Transducer g PT-FU-320 Limit
@pt_h.setter
def pt_h(self, pt_h):
self._pt_h = pt_h #Set Pressure Transducer g PT-CC-410 Limit
class TCLimits(object):
"""The Thermocouple Limits determine when 'Safe' mode breaks.
Attributes
----------
t : thermocouple [12]
a : thermocouple T-OX-210
b : thermocouple T-FU-310
c : thermocouple T-OX-220
d : thermocouple T-OX-230
e : thermocouple T-OX-240
f : thermocouple T-OX-250
g : thermocouple T-FU-320
h : thermocouple T-OX-260
i : thermocouple T-OX-270
j : thermocouple T-CC-410
k : thermocouple T-CC-420
l : thermocouple T-CC-430
"""
def __init__(self, tc_a=83, tc_b=303, tc_c=73, tc_d=73, tc_e=73, tc_f=73, tc_g=303, tc_h=73, tc_i=73, tc_j=573, tc_k=573, tc_l=573):
self._tc_a = tc_a
self._tc_b = tc_b
self._tc_c = tc_c
self._tc_d = tc_d
self._tc_e = tc_e
self._tc_f = tc_f
self._tc_g = tc_g
self._tc_h = tc_h
self._tc_i = tc_i
self._tc_j = tc_j
self._tc_k = tc_k
self._tc_l = tc_l
self._tc_limits = [tc_a, tc_b, tc_c, tc_d, tc_e, tc_f, tc_g, tc_h, tc_i, tc_j, tc_k, tc_l] #Thermocouple
@property
def tc_limits(self):
self._tc_limits = [self._tc_a, self._tc_b, self._tc_c, self._tc_d, self._tc_e, self._tc_f, self._tc_g, self._tc_h, self._tc_i, self._tc_j, self._tc_k, self._tc_l]
return self._tc_limits
@property
def tc_a(self):
return self._tc_a #Thermocouple a T-OX-210
@property
def tc_b(self):
return self._tc_b #Thermocouple b T-FU-310
@property
def tc_c(self):
return self._tc_c #Thermocouple c T-OX-220
@property
def tc_d(self):
return self._tc_d #Thermocouple d T-OX-230
@property
def tc_e(self):
return self._tc_e #Thermocouple e T-OX-240
@property
def tc_f(self):
return self._tc_f #Thermocouple f T-OX-250
@property
def tc_g(self):
return self._tc_g #Thermocouple g T-FU-320
@property
def tc_h(self):
return self._tc_h #Thermocouple h T-OX-260
@property
def tc_i(self):
return self._tc_i #Thermocouple i T-OX-270
@property
def tc_j(self):
return self._tc_j #Thermocouple j T-CC-410
@property
def tc_k(self):
return self._tc_k #Thermocouple k T-CC-420
@property
def tc_l(self):
return self._tc_l #Thermocouple l T-CC-430
@tc_a.setter
def tc_a(self, tc_a):
self._tc_a = tc_a #Set Thermocouple a T-OX-210 Limit
@tc_b.setter
def tc_b(self, tc_b):
self._tc_b = tc_b #Set Thermocouple b T-FU-310 Limit
@tc_c.setter
def tc_c(self,tc_c):
self._tc_c = tc_c #Set Thermocouple c T-OX-220 Limit
@tc_d.setter
def tc_d(self, tc_d):
self._tc_d = tc_d #Set Thermocouple d T-OX-230 Limit
@tc_e.setter
def tc_e(self, tc_e):
self._tc_e = tc_e #Set Thermocouple e T-OX-240 Limit
@tc_f.setter
def tc_f(self, tc_f):
self._tc_f = tc_f #Set Thermocouple f T-OX-250 Limit
@tc_g.setter
def tc_g(self, tc_g):
self._tc_g = tc_g #Set Thermocouple g T-FU-320 Limit
@tc_h.setter
def tc_h(self,tc_h):
self._tc_h = tc_h #Set Thermocouple h T-OX-260 Limit
@tc_i.setter
def tc_i(self, tc_i):
self._tc_i = tc_i #Set Thermocouple i T-OX-270 Limit
@tc_j.setter
def tc_j(self, tc_j):
self._tc_j = tc_j #Set Thermocouple j T-CC-410 Limit
@tc_k.setter
def tc_k(self, tc_k):
self._tc_k = tc_k #Set Thermocouple k T-CC-420 Limit
@tc_l.setter
def tc_l(self, tc_l):
self._tc_l = tc_l #Set Thermocouple l T-CC-430 Limit
class LCLimits(object):
"""The Load Cell Limits determine when 'Safe' mode breaks and propellant tank levels.
Attributes
----------
lc : load cell [3]
a : load cell LC-OX-210
b : load cell LC-FU-310
c : load cell LC-CC-410
"""
def __init__(self, lc_a=600, lc_b=600, lc_c=3000):
self._lc_a = lc_a
self._lc_b = lc_b
self._lc_c = lc_c
self._lc_limits = [lc_a, lc_b, lc_c] #Load Cells
@property
def lc_limits(self):
self._lc_limits = [self._lc_a, self._lc_b, self._lc_c]
return self._lc_limits
@property
def lc_a(self):
return self._lc_a #Load Cell a LC-OX-210
@property
def lc_b(self):
return self._lc_b #Load Cell b LC-FU-310
@property
def lc_c(self):
return self._lc_c #Load Cell c LC-CC-410
@lc_a.setter
def lc_a(self, lc_a):
self._lc_a = lc_a #Set Load Cell a LC-OX-210 Limit
@lc_b.setter
def lc_b(self, lc_b):
self._lc_b = lc_b #Set Load Cell a LC-FU-310 Limit
@lc_c.setter
def lc_c(self,lc_c):
self._lc_c = lc_c #Set Load Cell a LC-CC-410 Limit
class PTData(object):
"""The Pressure Transducer Data collected.
Attributes
----------
pt_data : pressure transducer data array [8]
"""
def __init__(self, pt_data=[0]*8):
self._pt_data = pt_data #Pressure Transducers
@property
def pt_data(self):
return self._pt_data
@pt_data.setter
def pt_data(self, pt_data):
self._pt_data = pt_data
class TCData(object):
"""The Thermocouple Data collected.
Attributes
----------
tc_data : thermocouple data array [12]
"""
def __init__(self, tc_data = [0]*12):
self._tc_data = tc_data #Thermocouple
@property
def tc_data(self):
return self._tc_data
@tc_data.setter
def tc_data(self, tc_data):
self._tc_data = tc_data
class LCData(object):
"""The Load Cell data collected.
Attributes
----------
lc_data : load cell data array [3]
"""
def __init__(self, lc_data=[0]*3):
self._lc_data = lc_data #Load Cells
@property
def lc_data(self):
return self._lc_data
@lc_data.setter
def lc_data(self, lc_data):
self._lc_data = lc_data
class LSData(object):
"""The State of a Limit Switch determines if a valve is 'opened' or 'closed'.
Attributes
----------
ls_state : limit switch data array [11]
"""
def __init__(self, ls_state = [0]*11):
self._ls_state = [ls_state]
@property
def ls_state(self):
return self._ls_state
@ls_state.setter
def ls_state(self, ls_data):
self._ls_state = ls_data
if __name__ == '__main__':
'''
lcl = LCLimits()
print(lcl.lc_limits)
print(lcl.lc_a)
lcl.lc_a = 1
print(lcl.lc_a)
print(lcl.lc_limits)
tcl = TCLimits()
print(tcl.tc_limits)
print(tcl.tc_a)
tcl.tc_a = 1
print(tcl.tc_a)
print(tcl.tc_limits)
ptl = PTLimits()
print(ptl.pt_limits)
print(ptl.pt_c)
ptl.pt_c = 1
print(ptl.pt_c)
print(ptl.pt_limits)
'''
|
"""Pryvate blueprints."""
|
def get_first_name():
return "Fred"
def get_last_name():
return "McFredface"
def get_full_name():
return "Fred McFredface"
|
"""Sort of like numpy.dtype, but with support for torch and python types.
Flexible/Shaped/Structured types, like `('void', 10)`, `('int', [2, 3])`,
`[('field1', type1), ('field2, type2)]`, etc. are not accepted.
Only basic types are.
For the generic types 'int' and 'float`, we follow numpy's and python's
convention and map them to 'int64' and 'float64'. This conflicts with
torch's and C's convention where they are mapped to 'int32' and 'float32'.
- To generate a dtype object: ` dtype(dtype_like)`
- To upcast dtypes: `upcast(a, b)`
- To convert dtypes: `as_torch(dtype, upcast=True)`,
`as_numpy(...)`, `as_python(...)`.
- Other utilities: `same_kind(a, b)`, `equivalent(a, b)`
"""
import abc
import sys
import numbers
import torch as _torch
from .optionals import numpy as np
concrete_types = []
# ----------------------------------------------------------------------
# BYTE ORDER
# ----------------------------------------------------------------------
class ByteOrderType(abc.ABCMeta):
is_native: bool = property(lambda cls: False)
is_little: bool = property(lambda cls: False)
is_big: bool = property(lambda cls: False)
str: str = property(lambda cls: '')
char = property(lambda cls: '')
def __str__(self):
return "byteorder('{}')".format(self.char)
__repr__ = __str__
def __eq__(self, other):
return self is byteorder(other)
class byteorder(abc.ABC, metaclass=ByteOrderType):
"""Base class for byte orders.
Objects generated by the constructor of this class are singletons.
"""
def __new__(cls, order):
if isinstance(order, str):
if order.lower() in ('>', 'big'):
return bigendian
elif order.lower() in ('<', 'little'):
return littleendian
elif order in ('=', '==', 'native'):
if sys.byteorder == 'little':
return littleendian
else:
return bigendian
elif order.lower() in ('|', 'none'):
return noendian
else:
raise ValueError('Unknown byte order {}'.format(order))
elif isinstance(order, type) and issubclass(order, byteorder):
return order
elif isinstance(order, type) and issubclass(order, dtype):
return order.byteorder
else:
raise TypeError('{} cannot be interpreted as a byte order'
.format(order))
def __init__(self, order):
"""
Parameters
----------
order : ByteOrderType or str
Possible string values are:
- {'>', 'big'} for big endian
- {'<', 'little'} for little endian
- {'=', 'native'} for native endianness
- {'|', 'none'} for no endianness
"""
# this is just used for documentation
pass
class LittleEndianType(ByteOrderType):
is_native = property(lambda cls: sys.byteorder == 'little')
is_little = property(lambda cls: True)
is_big = property(lambda cls: False)
str = property(lambda cls: 'little')
char = property(lambda cls: '<')
class littleendian(byteorder, metaclass=LittleEndianType):
"""Little endian
Bytes are ordered from the least significant to the most significant.
"""
def __new__(cls):
return littleendian
class BigEndianType(ByteOrderType):
is_native = property(lambda cls: sys.byteorder == 'big')
is_little = property(lambda cls: False)
is_big = property(lambda cls: True)
str = property(lambda cls: 'big')
char = property(lambda cls: '>')
class bigendian(byteorder, metaclass=BigEndianType):
"""Big endian
Bytes are ordered from the most significant to the least significant.
"""
def __new__(cls):
return bigendian
class NoEndianType(ByteOrderType):
is_native = property(lambda cls: True)
is_little = property(lambda cls: True)
is_big = property(lambda cls: True)
str = property(lambda cls: 'none')
char = property(lambda cls: '|')
class noendian(byteorder, metaclass=NoEndianType):
"""Endianness does not make sense.
This endianness is used for words made of zero or one byte.
"""
def __new__(cls):
return noendian
native = littleendian if sys.byteorder == 'little' else bigendian
# ----------------------------------------------------------------------
# DTYPE MAKER
# ----------------------------------------------------------------------
def _new_dtype(cls, obj):
if isinstance(obj, type) and issubclass(obj, dtype):
if not issubclass(obj, cls):
raise TypeError('Cannot make {} from {} (wrong hierarchy)'
.format(cls, obj))
return obj
if obj is None:
return _new_dtype(cls, _torch.get_default_dtype())
if np and isinstance(obj, str):
return _from_str(cls, obj)
if np and isinstance(obj, np.dtype):
return _from_np_dtype(cls, obj)
if np and isinstance(obj, type) and issubclass(obj, np.generic):
return _from_np_dtype(cls, np.dtype(obj))
if isinstance(obj, _torch.dtype):
return _from_torch(cls, obj)
if isinstance(obj, type) and issubclass(obj, numbers.Number):
return _from_python(cls, obj)
raise TypeError('{} cannot be interpreted as a type'.format(obj))
def _from_str(cls, obj):
if not isinstance(obj, str):
raise TypeError('Expected a string. Got {}.'.format(type(obj)))
if len(obj) == 1:
# from char
for dt in concrete_types:
if dt.char == obj:
return _new_dtype(cls, dt)
elif len(obj) in (2, 3):
# from str
if obj[0] in ('<', '>', '=', '|'):
bo = obj[0]
obj = obj[1:]
else:
bo = '='
if bo in ('=', '|'):
bo = native.char
for dt in concrete_types:
if dt.str in (bo + obj, '|' + obj):
return _new_dtype(cls, dt)
elif len(obj) > 3:
# from name
for dt in concrete_types:
if dt.name == obj.lower() and dt.is_native:
return _new_dtype(cls, dt)
raise ValueError('{} could not be interpreted as a type.'
.format(obj))
def _from_np_dtype(cls, obj):
if not np or not isinstance(obj, np.dtype):
raise TypeError('Expected numpy dtype but got {}.'.format(obj))
for dt in concrete_types:
if dt.numpy == obj:
return _new_dtype(cls, dt)
raise ValueError('{} could not be interpreted as a type.'
.format(obj))
def _from_torch(cls, obj):
if obj is _torch.bool:
return _new_dtype(cls, logical)
if obj is _torch.uint8:
return _new_dtype(cls, uint8)
if obj is _torch.int8:
return _new_dtype(cls, int8)
if obj is _torch.int16:
return _new_dtype(cls, int16)
if obj is _torch.int32:
return _new_dtype(cls, int32)
if obj is _torch.int64:
return _new_dtype(cls, int64)
if obj is _torch.float16:
return _new_dtype(cls, float16)
if obj is _torch.float32:
return _new_dtype(cls, float32)
if obj is _torch.float64:
return _new_dtype(cls, float64)
if obj is _torch.complex32:
return _new_dtype(cls, complex32)
if obj is _torch.complex64:
return _new_dtype(cls, complex64)
if obj is _torch.complex128:
return _new_dtype(cls, complex128)
if not isinstance(obj, _torch.dtype):
raise TypeError('Input object is not a torch data type')
raise TypeError('Input (quantized?) data type is not supported')
def _from_python(cls, obj):
if obj is bool:
return _new_dtype(cls, bool_)
if obj is int:
return _new_dtype(cls, int_)
if obj is float:
return _new_dtype(cls, float_)
if obj is complex:
return _new_dtype(cls, complex_)
if not isinstance(obj, numbers.Number):
raise TypeError('Input object is not a python data type')
raise TypeError('Input data type is not supported')
# ----------------------------------------------------------------------
# DATA TYPES
# ----------------------------------------------------------------------
class DataType(type):
is_floating_point: bool = property(lambda cls: False)
is_complex: bool = property(lambda cls: False)
is_signed: bool = property(lambda cls: False)
is_native: bool = property(lambda cls: cls.byteorder in (native, noendian))
is_builtin: bool = property(lambda cls: False)
is_concrete: bool = property(lambda cls: False)
byteorder: byteorder = property(lambda cls: native)
alignment: int = property(lambda cls: 0)
itemsize: int = property(lambda cls: 0)
kind: str = property(lambda cls: '')
min: int = property(lambda cls: None)
max: int = property(lambda cls: None)
eps: int = property(lambda cls: None)
numpy: (np.dtype if np else type(None)) = property(lambda cls: None)
torch: _torch.dtype = property(lambda cls: None)
python: type = property(lambda cls: None)
numpy_upcast = property(lambda cls: cls.numpy)
torch_upcast = property(lambda cls: cls.torch)
python_upcast = property(lambda cls: cls.python)
str: str = property(lambda cls: None)
name: str = property(lambda cls: None)
char: str = property(lambda cls: None)
def same_byteorder(cls, other):
return (cls.byteorder == other.byteorder or
cls.byteorder == noendian or
other.byteorder == noendian)
def __str__(self):
return self.__name__
__repr__ = __str__
def __eq__(self, other):
return self is dtype(other)
def __lt__(cls, other):
other = dtype(other)
if cls is other:
return False
if issubclass(cls, integer) and issubclass(other, integer):
return cls.min >= other.min and cls.max <= other.max
elif issubclass(cls, integer) and issubclass(other, floatingpoint):
return cls.max <= other.significand_max
elif issubclass(cls, floatingpoint) and issubclass(other, floatingpoint):
return (cls.exponent_precision <= other.exponent_precision or
cls.significand_precision <= other.significand_precision)
else:
return False
def __gt__(cls, other):
other = dtype(other)
return other < cls
def __le__(cls, other):
other = dtype(other)
return cls < other or cls == other
def __ge__(cls, other):
other = dtype(other)
return cls > other or cls == other
class dtype(metaclass=DataType):
"""Base class for all data types.
Objects generated by the constructor of this class are singletons.
Properties
----------
is_floating_point: bool True if `float*` or `complex*` type
is_complex: bool True if `complex*` type
is_signed: bool True if `int*`, `float*`, `complex*` type
is_native: bool True if `byteorder is native`
is_builtin: bool True if it is a builtin data type
is_concrete: bool True if concrete (not abstract) type
byteorder: byteorder {bigendian, littleendian, noendian}
itemsize: int Size of one element in bytes
alignment: int Size of one element in bytes, including alignment padding
kind: str Character representing the general family ('u', 'i', 'b', 'f', 'c')
min: litteral Smallest value that can be encoded
max: litteral Largest value that can be encoded
eps: litteral Machine epsilon (smallest value such that `1 + eps != 1` in that type)
numpy: np.dtype or NoneType The corresponding data type in numpy (`None` if no equivalent data type)
torch: torch.dtype or NoneType The corresponding data type in torch (`None` if no equivalent data type)
python: type or NoneType The corresponding data type in python (`None` if no equivalent data type)
numpy_upcast: np.dtype Smallest numpy datatype that can be used to upcast the type
torch_upcast: torch.dtype Smallest torch datatype that can be used to upcast the type
python_upcast: type Smallest python datatype that can be used to upcast the type
name: str A (pretty) name for the data type
str: str A unique string identifier for the data type
char: str A unique char identifier for the data type
Methods
-------
newbyteorder() -> dtype Same data type with opposite byte order
__eq__(other) -> bool True if exact same types (including byte order)
__lt__(other) -> bool Self can be converted to other without loss
__gt__(other) -> bool Other can be converted to self without loss
__le__(other) -> bool `__eq__ or __lt__`
__ge__(other) -> bool `__eq__ or __gt__`
same_byteorder(other) -> bool True if types have same byte order
"""
def __new__(cls, *args, **kwargs):
return _new_dtype(cls, *args, **kwargs)
def __init__(self, dtype):
"""
Parameters
----------
dtype : ni.dtype or np.dtype or torch.dtype or type or str
Data type: either a data type from a supported package
(torch, numpy or python) or a character or a string.
Each builtin type is associated to a unique character:
- '?' -> bool
- 'b', 'B' -> int8, uint8
- 'h', 'H' -> int16, uint16
- 'i', 'I' -> int32, uint32
- 'l', 'L' -> int64, uint64
- 'e', 'E' -> float16, complex32
- 'f', 'F' -> float32, complex64
- 'd', 'D' -> float64, complex128
These types can also be described by a letter (encoding the
'kind') and a number (encoding the size in bytes):
- 'b1' -> bool
- 'i1', 'i2', 'i4', 'i8' -> int8, int16, int32, int64
- 'u1', 'u2', 'u4', 'u8' -> uint8, uint16, uint32, uint64
- 'f2', 'f4', 'f8' -> float16, float32, float64
- 'c4', 'c8', 'c16' -> complex16, complex32, complex64
These two character types can be appended with a byte-order:
- '>' -> big endian
- '<' -> little endian
- '=' -> native endianness (default)
- '|' -> no endianness (for types with itemsize < 1)
Finally, common names ('bool', 'uint8', etc) can be used.
They always map to the native byte order.
"""
# this is just used for documentation
pass
class Number(DataType):
pass
class number(dtype, metaclass=Number):
"""Base class for numbers (booleans, integers, floats)."""
pass
class Integer(Number):
is_floating_point = property(lambda cls: False)
class integer(number, metaclass=Integer):
"""Base class for integers (signed, unsigned)"""
pass
class Signed(Integer):
is_signed = property(lambda cls: True)
kind = property(lambda cls: 'i')
class signed(integer, metaclass=Signed):
"""Base class for signed integers (int*)"""
pass
class Unsigned(Integer):
is_signed = property(lambda cls: False)
kind = property(lambda cls: 'u')
class unsigned(integer, metaclass=Unsigned):
"""Base class for unsigned integers (uint*)"""
is_signed = property(lambda cls: False)
kind = property(lambda cls: 'u')
class FloatingPoint(Number):
is_floating_point = property(lambda cls: True)
basis = property(lambda cls: 2)
exponent_precision = property(lambda cls: None)
exponent_bias = property(lambda cls: None)
exponent_max = property(lambda cls: None)
exponent_min = property(lambda cls: None)
significand_precision = property(lambda cls: None)
significand_max = property(lambda cls: None)
significand_min = property(lambda cls: 0)
normalized_min = property(lambda cls: None)
class floatingpoint(number, metaclass=FloatingPoint):
"""Base class for IEEE floats (float*, complex*)
Additional Properties
---------------------
exponent_basis: litteral Floating point basis (2)
exponent_precision: int Size of the exponent in bits
exponent_bias: int Bias added to the exponent
exponent_max: int Largest possible exponent
exponent_min: int Smallest possible exponent
significand_precision: int Size of the significand in bits (inc. implicit bit)
significand_max: int Largest possible significand
significand_min: int Smallest possible significand
normalized_min: float Smallest non-negative normalized value
"""
pass
class Real(FloatingPoint):
kind = property(lambda cls: 'f')
class real(floatingpoint, metaclass=Real):
"""Base class for real floats (float*)"""
pass
class Complex(FloatingPoint):
is_complex = property(lambda cls: True)
kind = property(lambda cls: 'c')
class complex(floatingpoint, metaclass=Complex):
"""Base class for complex floats (float*)"""
pass
class Logical(Unsigned):
kind = property(lambda cls: 'b')
min = property(lambda cls: 0)
max = property(lambda cls: 1)
eps = property(lambda cls: 1)
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: noendian)
alignment = property(lambda cls: 1)
itemsize = property(lambda cls: 1)
numpy = property(lambda cls: np.dtype('b1') if np else None)
torch = property(lambda cls: _torch.bool)
python = property(lambda cls: bool)
name = property(lambda cls: 'bool')
str = property(lambda cls: '|b1')
char = property(lambda cls: '?')
def newbyteorder(cls):
return cls
class logical(unsigned, metaclass=Logical):
"""Boolean data type"""
pass
bool_ = logical
# ----------------------------------------------------------------------
# CONCRETE TYPES
# ----------------------------------------------------------------------
class UInt8(Unsigned):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: noendian)
alignment = property(lambda cls: 1)
itemsize = property(lambda cls: 1)
min = property(lambda cls: 0)
max = property(lambda cls: 255)
eps = property(lambda cls: 1)
numpy = property(lambda cls: np.dtype('u1') if np else None)
torch = property(lambda cls: _torch.uint8)
python = property(lambda cls: None)
python_upcast = property(lambda cls: int)
name = property(lambda cls: 'uint8')
str = property(lambda cls: '|u1')
char = property(lambda cls: 'B')
def newbyteorder(cls):
return cls
class uint8(unsigned, metaclass=UInt8):
pass
class UInt16(Unsigned):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 2)
itemsize = property(lambda cls: 2)
min = property(lambda cls: 0)
max = property(lambda cls: 65535)
eps = property(lambda cls: 1)
numpy = property(lambda cls: np.dtype('u2') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.int32)
python = property(lambda cls: None)
python_upcast = property(lambda cls: int)
name = property(lambda cls: 'uint16')
str = property(lambda cls: cls.byteorder.char + 'u2')
char = property(lambda cls: 'H')
def newbyteorder(cls):
return uint16l if native == bigendian else uint16b
class uint16(unsigned, metaclass=UInt16):
pass
if native is littleendian:
UInt16Little = UInt16
uint16l = uint16
class UInt16Big(UInt16):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>u2') if np else None)
def newbyteorder(cls):
return uint16l
class uint16b(uint16, metaclass=UInt16Big):
pass
else:
UInt16Big = UInt16
uint16b = uint16
class UInt16Little(UInt16):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<u2') if np else None)
def newbyteorder(cls):
return uint16b
class uint16l(uint16, metaclass=UInt16Little):
pass
class UInt32(Unsigned):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 4)
itemsize = property(lambda cls: 4)
min = property(lambda cls: 0)
max = property(lambda cls: 4294967295)
eps = property(lambda cls: 1)
numpy = property(lambda cls: np.dtype('u4') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.int64)
python = property(lambda cls: None)
python_upcast = property(lambda cls: int)
name = property(lambda cls: 'uint32')
str = property(lambda cls: cls.byteorder.char + 'u4')
char = property(lambda cls: 'I')
def newbyteorder(cls):
return uint32l if native == bigendian else uint32b
class uint32(unsigned, metaclass=UInt32):
pass
if native is littleendian:
UInt32Little = UInt32
uint32l = uint32
class UInt32Big(UInt32):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>u4') if np else None)
def newbyteorder(cls):
return uint32l
class uint32b(uint32, metaclass=UInt32Big):
pass
else:
UInt32Big = UInt32
uint32b = uint32
class UInt32Little(UInt32):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<u4') if np else None)
def newbyteorder(cls):
return uint32b
class uint32l(uint32, metaclass=UInt32Little):
pass
class UInt64(Unsigned):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 8)
itemsize = property(lambda cls: 8)
min = property(lambda cls: 0)
max = property(lambda cls: 18446744073709551615)
eps = property(lambda cls: 1)
numpy = property(lambda cls: np.dtype('u8') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.double)
python = property(lambda cls: None)
python_upcast = property(lambda cls: float)
name = property(lambda cls: 'uint64')
str = property(lambda cls: cls.byteorder.char + 'u8')
char = property(lambda cls: 'L')
def newbyteorder(cls):
return uint64l if native == bigendian else uint64b
class uint64(unsigned, metaclass=UInt64):
pass
if native is littleendian:
UInt64Little = UInt64
uint64l = uint64
class UInt64Big(UInt64):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>u8') if np else None)
def newbyteorder(cls):
return uint64l
class uint64b(uint64, metaclass=UInt64Big):
pass
else:
UInt64Big = UInt64
uint64b = uint64
class UInt64Little(UInt64):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<u8') if np else None)
def newbyteorder(cls):
return uint64b
class uint64l(uint64, metaclass=UInt64Little):
pass
class Int8(Signed):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: noendian)
alignment = property(lambda cls: 1)
itemsize = property(lambda cls: 1)
min = property(lambda cls: -128)
max = property(lambda cls: 127)
eps = property(lambda cls: 1)
numpy = property(lambda cls: np.dtype('i1') if np else None)
torch = property(lambda cls: _torch.int8)
python = property(lambda cls: None)
python_upcast = property(lambda cls: int)
name = property(lambda cls: 'int8')
str = property(lambda cls: '|i1')
char = property(lambda cls: 'b')
def newbyteorder(cls):
return cls
class int8(signed, metaclass=Int8):
pass
class Int16(Signed):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 2)
itemsize = property(lambda cls: 2)
min = property(lambda cls: -32768)
max = property(lambda cls: 32767)
eps = property(lambda cls: 1)
numpy = property(lambda cls: np.dtype('i2') if np else None)
torch = property(lambda cls: _torch.int16)
python = property(lambda cls: None)
python_upcast = property(lambda cls: int)
name = property(lambda cls: 'int16')
str = property(lambda cls: cls.byteorder.char + 'i2')
char = property(lambda cls: 'h')
def newbyteorder(cls):
return int16l if native == bigendian else int16b
class int16(signed, metaclass=Int16):
pass
if native is littleendian:
Int16Little = Int16
int16l = int16
class Int16Big(Int16):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>i2') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.int16)
def newbyteorder(cls):
return int16l
class int16b(int16, metaclass=Int16Big):
pass
else:
Int16Big = Int16
int16b = int16
class Int16Little(Int16):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<i2') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.int16)
def newbyteorder(cls):
return int16b
class int16l(int16, metaclass=Int16Little):
pass
class Int32(Signed):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 4)
itemsize = property(lambda cls: 4)
min = property(lambda cls: -2147483648)
max = property(lambda cls: 2147483647)
eps = property(lambda cls: 1)
numpy = property(lambda cls: np.dtype('i4') if np else None)
torch = property(lambda cls: _torch.int32)
python = property(lambda cls: None)
python_upcast = property(lambda cls: int)
name = property(lambda cls: 'int32')
str = property(lambda cls: cls.byteorder.char + 'i4')
char = property(lambda cls: 'i')
def newbyteorder(cls):
return int32l if native == bigendian else int32b
class int32(signed, metaclass=Int32):
pass
if native is littleendian:
Int32Little = Int32
int32l = int32
class Int32Big(Int32):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>i4') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.int32)
def newbyteorder(cls):
return int32l
class int32b(int32, metaclass=Int32Big):
pass
else:
Int32Big = Int32
int32b = int32
class Int32Little(Int32):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<i4') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.int32)
def newbyteorder(cls):
return int32b
class int32l(int32, metaclass=Int32Little):
pass
class Int64(Signed):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 8)
itemsize = property(lambda cls: 8)
min = property(lambda cls: -9223372036854775808)
max = property(lambda cls: 9223372036854775807)
eps = property(lambda cls: 1)
numpy = property(lambda cls: np.dtype('i8') if np else None)
torch = property(lambda cls: _torch.int64)
python = property(lambda cls: int)
name = property(lambda cls: 'int64')
str = property(lambda cls: cls.byteorder.char + 'i8')
char = property(lambda cls: 'l')
def newbyteorder(cls):
return int64l if native == bigendian else int64b
class int64(signed, metaclass=Int64):
pass
if native is littleendian:
Int64Little = Int64
int64l = int64
class Int64Big(Int64):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>i8') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.int64)
python = property(lambda cls: None)
python_upcast = property(lambda cls: int)
def newbyteorder(cls):
return int64l
class int64b(int64, metaclass=Int64Big):
pass
else:
Int64Big = Int64
int64b = int64
class Int64Little(Int64):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<i8') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.int64)
python = property(lambda cls: None)
python_upcast = property(lambda cls: int)
def newbyteorder(cls):
return int64b
class int64l(int64, metaclass=Int64Little):
pass
int_ = int64
class Float16(Real):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 2)
itemsize = property(lambda cls: 2)
exponent_precision = property(lambda cls: 5)
exponent_bias = property(lambda cls: 15)
exponent_max = property(lambda cls: 15)
exponent_min = property(lambda cls: -14)
significand_precision = property(lambda cls: 11)
significand_max = property(lambda cls: 2047)
significand_min = property(lambda cls: 0)
normalized_min = property(lambda cls: 2**-14)
min = property(lambda cls: -65504.0)
max = property(lambda cls: 65504.0)
eps = property(lambda cls: 1e-03)
numpy = property(lambda cls: np.dtype('f2') if np else None)
torch = property(lambda cls: _torch.float16)
python = property(lambda cls: None)
python_upcast = property(lambda cls: float)
name = property(lambda cls: 'float16')
str = property(lambda cls: cls.byteorder.char + 'f2')
char = property(lambda cls: 'e')
def newbyteorder(cls):
return float16l if native == bigendian else float16b
class float16(real, metaclass=Float16):
pass
if native is littleendian:
Float16Little = Float16
float16l = float16
class Float16Big(Float16):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>f2') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.float16)
python = property(lambda cls: None)
def newbyteorder(cls):
return float16l
class float16b(float16, metaclass=Float16Big):
pass
else:
Float16Big = Float16
float16b = float16
class Float16Little(Float16):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<f2') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.float16)
python = property(lambda cls: None)
def newbyteorder(cls):
return float16b
class float16l(float16, metaclass=Float16Little):
pass
class Float32(Real):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 4)
itemsize = property(lambda cls: 4)
exponent_precision = property(lambda cls: 8)
exponent_bias = property(lambda cls: -126)
exponent_max = property(lambda cls: 127)
exponent_min = property(lambda cls: 127)
significand_precision = property(lambda cls: 24)
significand_max = property(lambda cls: 16777215)
significand_min = property(lambda cls: 0)
normalized_min = property(lambda cls: 2**-126)
min = property(lambda cls: -3.4028235e+38)
max = property(lambda cls: 3.4028235e+38)
eps = property(lambda cls: 1e-06)
numpy = property(lambda cls: np.dtype('f4') if np else None)
torch = property(lambda cls: _torch.float32)
python = property(lambda cls: None)
python_upcast = property(lambda cls: float)
name = property(lambda cls: 'float32')
str = property(lambda cls: cls.byteorder.char + 'f4')
char = property(lambda cls: 'f')
def newbyteorder(cls):
return float32l if native == bigendian else float32b
class float32(real, metaclass=Float32):
pass
if native is littleendian:
Float32Little = Float32
float32l = float32
class Float32Big(Float32):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>f4') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.float32)
python = property(lambda cls: None)
def newbyteorder(cls):
return float32l
class float32b(float32, metaclass=Float32Big):
pass
else:
Float32Big = Float32
float32b = float32
class Float32Little(Float32):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<f4') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.float32)
python = property(lambda cls: None)
def newbyteorder(cls):
return float32b
class float32l(float32, metaclass=Float32Little):
pass
class Float64(Real):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 8)
itemsize = property(lambda cls: 8)
exponent_precision = property(lambda cls: 11)
exponent_bias = property(lambda cls: -1022)
exponent_max = property(lambda cls: 1023)
exponent_min = property(lambda cls: 1023)
significand_precision = property(lambda cls: 53)
significand_max = property(lambda cls: 9007199254740991)
significand_min = property(lambda cls: 0)
normalized_min = property(lambda cls: 2**-1022)
min = property(lambda cls: -1.7976931348623157e+308)
max = property(lambda cls: 1.7976931348623157e+308)
eps = property(lambda cls: 1e-15)
numpy = property(lambda cls: np.dtype('f8') if np else None)
torch = property(lambda cls: _torch.float64)
python = property(lambda cls: float)
name = property(lambda cls: 'float64')
str = property(lambda cls: cls.byteorder.char + 'f8')
char = property(lambda cls: 'd')
def newbyteorder(cls):
return float64l if native == bigendian else float64b
class float64(real, metaclass=Float64):
pass
if native is littleendian:
Float64Little = Float64
float64l = float64
class Float64Big(Float64):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>f8') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.float64)
python = property(lambda cls: None)
python_upcast = property(lambda cls: float)
def newbyteorder(cls):
return float64l
class float64b(float64, metaclass=Float64Big):
pass
else:
Float64Big = Float64
float64b = float64
class Float64Little(Float64):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<f8') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.float64)
python = property(lambda cls: None)
python_upcast = property(lambda cls: float)
def newbyteorder(cls):
return float64b
class float64l(float64, metaclass=Float64Little):
pass
float_ = float64
class Complex32(Complex):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 4)
itemsize = property(lambda cls: 4)
exponent_precision = property(lambda cls: 5)
exponent_bias = property(lambda cls: 15)
exponent_max = property(lambda cls: 15)
exponent_min = property(lambda cls: -14)
significand_precision = property(lambda cls: 11)
significand_max = property(lambda cls: 2047)
significand_min = property(lambda cls: 0)
normalized_min = property(lambda cls: 2**-14)
min = property(lambda cls: -65504.0)
max = property(lambda cls: 65504.0)
eps = property(lambda cls: 1e-03)
numpy = property(lambda cls: None)
numpy_upcast = property(lambda cls: np.dtype('c8') if np else None)
torch = property(lambda cls: _torch.complex32)
python = property(lambda cls: None)
python_upcast = property(lambda cls: complex)
name = property(lambda cls: 'complex32')
str = property(lambda cls: cls.byteorder.char + 'c8')
char = property(lambda cls: 'E')
def newbyteorder(cls):
return complex32l if native == bigendian else complex32b
class complex32(complex, metaclass=Complex32):
pass
if native is littleendian:
Complex32Little = Complex32
complex32l = complex32
class Complex32Big(Complex32):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy_upcast = property(lambda cls: np.dtype('>c8') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.complex32)
def newbyteorder(cls):
return complex32l
class complex32b(complex32, metaclass=Complex32Big):
pass
else:
Complex32Big = Complex32
complex32b = complex32
class Complex32Little(Complex32):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy_upcast = property(lambda cls: np.dtype('<c8') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.complex32)
def newbyteorder(cls):
return complex32b
class complex32l(complex32, metaclass=Complex32Little):
pass
class Complex64(Complex):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 8)
itemsize = property(lambda cls: 8)
exponent_precision = property(lambda cls: 11)
exponent_bias = property(lambda cls: -1022)
exponent_max = property(lambda cls: 1023)
exponent_min = property(lambda cls: 1023)
significand_precision = property(lambda cls: 53)
significand_max = property(lambda cls: 9007199254740991)
significand_min = property(lambda cls: 0)
normalized_min = property(lambda cls: 2**-1022)
min = property(lambda cls: -1.7976931348623157e+308)
max = property(lambda cls: 1.7976931348623157e+308)
eps = property(lambda cls: 1e-15)
numpy = property(lambda cls: np.dtype('c8') if np else None)
torch = property(lambda cls: _torch.complex64)
python = property(lambda cls: None)
python_upcast = property(lambda cls: complex)
name = property(lambda cls: 'complex64')
str = property(lambda cls: cls.byteorder.char + 'c8')
char = property(lambda cls: 'F')
def newbyteorder(cls):
return complex64l if native == bigendian else complex64b
class complex64(complex, metaclass=Complex64):
pass
if native is littleendian:
Complex64Little = Complex64
complex64l = complex64
class Complex64Big(Complex64):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>c8') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.complex64)
def newbyteorder(cls):
return complex64l
class complex64b(complex64, metaclass=Complex64Big):
pass
else:
Complex64Big = Complex64
complex64b = complex64
class Complex64Little(Complex64):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<c8') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.complex64)
def newbyteorder(cls):
return complex64b
class complex64l(complex64, metaclass=Complex64Little):
pass
class Complex128(Complex):
is_builtin = property(lambda cls: True)
is_concrete = property(lambda cls: True)
byteorder = property(lambda cls: native)
alignment = property(lambda cls: 8)
itemsize = property(lambda cls: 8)
exponent_precision = property(lambda cls: 11)
exponent_bias = property(lambda cls: -1022)
exponent_max = property(lambda cls: 1023)
exponent_min = property(lambda cls: 1023)
significand_precision = property(lambda cls: 53)
significand_max = property(lambda cls: 9007199254740991)
significand_min = property(lambda cls: 0)
normalized_min = property(lambda cls: 2**-1022)
min = property(lambda cls: -1.7976931348623157e+308)
max = property(lambda cls: 1.7976931348623157e+308)
eps = property(lambda cls: 1e-15)
numpy = property(lambda cls: np.dtype('c16') if np else None)
torch = property(lambda cls: _torch.complex128)
python = property(lambda cls: complex)
name = property(lambda cls: 'complex128')
str = property(lambda cls: cls.byteorder.char + 'c16')
char = property(lambda cls: 'D')
def newbyteorder(cls):
return complex128l if native == bigendian else complex128b
class complex128(complex, metaclass=Complex128):
pass
if native is littleendian:
Complex128Little = Complex128
complex128l = complex128
class Complex128Big(Complex128):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: bigendian)
numpy = property(lambda cls: np.dtype('>c16') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.complex128)
python = property(lambda cls: None)
python_upcast = property(lambda cls: complex)
def newbyteorder(cls):
return complex128l
class complex128b(complex128, metaclass=Complex128Big):
pass
else:
Complex128Big = Complex128
complex128b = complex128
class Complex128Little(Complex128):
is_builtin = property(lambda cls: False)
byteorder = property(lambda cls: littleendian)
numpy = property(lambda cls: np.dtype('<c16') if np else None)
torch = property(lambda cls: None)
torch_upcast = property(lambda cls: _torch.complex128)
python = property(lambda cls: None)
python_upcast = property(lambda cls: complex)
def newbyteorder(cls):
return complex128b
class complex128l(complex128, metaclass=Complex128Little):
pass
complex_ = complex128
concrete_types.extend([
logical, uint8, int8, uint16l, uint16b, int16l, int16b,
uint32l, uint32b, int32l, int32b, uint64l, uint64b, int64l, int64b,
float16l, float16b, float32l, float32b, float64l, float64b,
complex32l, complex32b, complex64l, complex64b, complex128l, complex128b,
])
def upcast(*dtypes):
"""Return a data type that upcasts both input types
Parameters
----------
*dtypes : dtype_like
Returns
-------
dtype
"""
if len(dtypes) == 0:
raise ValueError('Expected at least one type')
if len(dtypes) == 1:
return dtype(dtypes[0])
if len(dtypes) > 2:
dtype1, dtype2, *dtypes = dtypes
return upcast(upcast(dtype1, dtype2), *dtypes)
dtype1, dtype2 = dtypes
dtype1 = dtype(dtype1)
dtype2 = dtype(dtype2)
if dtype1 == dtype2:
return dtype1
elif dtype1 < dtype2 and dtype2 < dtype1:
if dtype1.byteorder is native:
return dtype1
else:
return dtype2
elif not (dtype1 < dtype2) and not (dtype2 < dtype1):
return dtype(_torch.get_default_dtype())
elif dtype1 < dtype2:
return dtype2
elif dtype2 < dtype1:
return dtype1
def as_dtype(package, dt, upcast=True):
"""Convert a generic data type to another package type
Parameters
----------
package : {'torch', 'numpy', 'python'}
Target package
dt : dtype_like
Input data type
upcast : bool, default=True
If True, authorize upcasting the input type
Returns
-------
dtype_like
Torch data type
"""
dt = dtype(dt)
dt0 = getattr(dt, package)
if dt0 is None:
if upcast:
dt0 = getattr(dt, package + '_upcast')
else:
raise TypeError('Cannot convert type {} to torch.'.format(dt))
return dt0
def as_torch(dt, upcast=True):
"""Convert a generic data type to a torch type
Parameters
----------
dt : dtype_like
Input data type
upcast : bool, default=True
If True, authorize upcasting the input type
Returns
-------
torch.dtype
Torch data type
"""
return as_dtype('torch', dt, upcast)
def as_numpy(dt, upcast=True):
"""Convert a generic data type to a numpy type
Parameters
----------
dt : dtype_like
Input data type
upcast : bool, default=True
If True, authorize upcasting the input type
Returns
-------
np.dtype
Numpy data type
"""
return as_dtype('numpy', dt, upcast)
def as_python(dt, upcast=True):
"""Convert a generic data type to a python type
Parameters
----------
dt : dtype_like
Input data type
upcast : bool, default=True
If True, authorize upcasting the input type
Returns
-------
type
Python data type
"""
return as_dtype('python', dt, upcast)
def equivalent(dtype1, dtype2):
"""Two data types are equivalent if they are equal up-to byte order
Parameters
----------
dtype1 : dtype_like
dtype2 : dtype_like
Returns
-------
bool
"""
dtype1 = dtype(dtype1)
dtype2 = dtype(dtype2)
return dtype1 in (dtype2, dtype2.newbyteorder())
def same_kind(dtype1, dtype2):
"""Check that two data types have the same kind
Parameters
----------
dtype1 : dtype_like
dtype2 : dtype_like
Returns
-------
bool
"""
dtype1 = dtype(dtype1)
dtype2 = dtype(dtype2)
return dtype1.kind == dtype2.kind
|
import pandas as pd
import re
import json
json_df = pd.read_json("./planet_booty_songs.json")
filter_words = ["verse 1", "verse 2", "verse 3", "verse 4", "verse 5", "chorus", "intro", "outro", "bridge", "refrain", "pre chorus", ""]
# print(json_df)
counter = 0
for index,row in json_df.iterrows():
song = row[0]
counter += 1
# print(index, song['name'], song['lyrics'])
for line in song['lyrics'].split('\n'):
line = line.replace(u"\u2018", "'").replace(u"\u2019", "'").strip().lstrip()
line = re.sub('\W+',' ', line)
line = line.lower().strip()
if line not in filter_words:
print(line)
|
# coding=utf-8
class FakePlayer(object):
def __init__(self):
self.storage = {}
def get_setting(self, key, var_type="str"):
if var_type not in ("str", "int", "float", "bool", "list"):
raise ValueError("Unknown setting type")
if var_type == "int":
value = int(self.storage.get(key))
elif var_type == "float":
value = float(self.storage.get(key))
elif var_type == "bool":
value = bool(self.storage.get(key))
elif var_type == "list":
value = self.storage.get(key)
value = value.split("|") if value else []
else:
value = self.storage.get(key)
return value
def set_setting(self, key, value):
if isinstance(value, (list, tuple)):
value = "|".join(value)
self.storage[key] = value
|
class CliAction(object):
def __init__(self, name, description, options=None):
self.name = name
self.description = description
self.options = options
def __str__(self):
string = ' - {name}: {desc}'.format(name=self.name, desc=self.description)
if self.options is not None:
for option in self.options:
string += '\n\t' + str(option)
return string
|
#!/usr/bin/env python3
import os
import sys
import gzip
import json
import time
import optparse
import importlib
from importlib import machinery
from multiprocessing import Pool
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../../')
from fp_constants import *
fingerprinter = None
app_families_file = '../../../resources/app_families.txt'
app_families = {}
for line in open(app_families_file, 'r'):
tokens = line.strip().split(',')
for i in range(1, len(tokens)):
app_families[tokens[i]] = tokens[0]
app_families_strict_file = '../../../resources/app_families_strict.txt'
app_families_strict = {}
for line in open(app_families_strict_file, 'r'):
tokens = line.strip().split(',')
for i in range(1, len(tokens)):
app_families_strict[tokens[i]] = tokens[0]
fp_sni_blacklist = set([])
class Validation:
def __init__(self, in_file, fp_db_name, output, categories, top, blacklist, malware_ctx_file, proc_list):
if output == sys.stdout:
self.out_file_pointer = sys.stdout
else:
self.out_file_pointer = open(output, 'w')
self.categories = categories
self.top = top
self.blacklist = blacklist
self.malware_ctx = None
if malware_ctx_file != None:
self.malware_ctx = {}
for line in gzip.open(malware_ctx_file):
fp_ = json.loads(line)
self.malware_ctx[fp_['str_repr']] = fp_
self.proc_list = None
if proc_list != None:
self.proc_list = []
t_ = proc_list.split(';')
for s in t_:
if s != '':
tmp_proc_list = s.split(',')
self.categories.append(tmp_proc_list[0])
self.proc_list.append(tmp_proc_list)
# read in application categories
app_cat_file = 'application_categories.json.gz'
with gzip.open(app_cat_file,'r') as fp:
self.app_cat_data = json.loads(fp.read())
self.mt_pool = Pool(32)
self.input_file = in_file
if in_file.endswith('.csv.gz'):
self.data = self.read_file_csv(in_file)
elif in_file.endswith('.json.gz') and 'dmz' in in_file:
self.data = self.read_file_dmz_json(in_file)
elif in_file.endswith('.json.gz'):
self.data = self.read_file_json(in_file)
else:
print('error: file format not supported')
sys.exit(-1)
def validate_process_identification(self):
results = []
unknown_fp = 0
unknown_s = 0
if self.top:
results = self.mt_pool.map(get_results_top, [self.data[k] for k in self.data])
elif self.blacklist:
results = self.mt_pool.map(get_results_blacklist, [self.data[k] for k in self.data])
else:
results = self.mt_pool.map(get_results, [self.data[k] for k in self.data])
# for k in self.data:
# results.append(get_results(self.data[k]))
self.data = None
self.analyze_results(results)
def analyze_results(self, results):
r_tmp_ = self.mt_pool.map(process_result, [(sl, self.categories) for sl in results])
r_tmp_ = [x for sl in r_tmp_ for x in sl]
r_ = [sum([row[i] for row in r_tmp_]) for i in range(0,len(r_tmp_[0][:-1]))]
print('FILE: %s' % self.input_file)
print('\tTotal:\t\t\t\t % 8i' % r_[0])
print('\t :\t top-1 top-2 top-3 top-4 top-5')
print('\tProcess Name Category Accuracy:\t %0.6f %0.6f %0.6f %0.6f %0.6f' % (r_[2]/r_[0], (r_[2]+r_[5])/r_[0], (r_[2]+r_[5]+r_[7])/r_[0], (r_[2]+r_[5]+r_[7]+r_[9])/r_[0], (r_[2]+r_[5]+r_[7]+r_[9]+r_[11])/r_[0]))
print('\tProcess Name Accuracy:\t\t %0.6f %0.6f %0.6f %0.6f %0.6f' % (r_[1]/r_[0], (r_[1]+r_[4])/r_[0], (r_[1]+r_[4]+r_[6])/r_[0], (r_[1]+r_[4]+r_[6]+r_[8])/r_[0], (r_[1]+r_[4]+r_[6]+r_[8]+r_[9])/r_[0]))
# print('\tSHA-256 Accuracy:\t\t %0.6f' % (r_[3]/r_[0]))
r_c = [row[-1] for row in r_tmp_]
idx = 0
for c in self.categories:
if c == '':
continue
r_ = [sum([row[idx][i] for row in r_c]) for i in range(0,len(r_c[0][0]))]
print('\n\t%s Accuracy:\t\t %0.6f' % (c, (r_[1]/r_[0])))
print('\t%s Confusion Matrix:' % c)
print('\t\t\t Positive Negative')
print('\t\tPositive:% 9i\t% 9i' % (r_[2], r_[5]))
print('\t\tNegative:% 9i\t% 9i' % (r_[4], r_[3]))
if r_[2]+r_[5] > 0:
print('\t\tRecall: %0.6f' % (r_[2]/(r_[2]+r_[5])))
else:
print('\t\tRecall: %0.6f' % (0.0))
if r_[2]+r_[4] > 0:
print('\t\tPrecision: %0.6f' % (r_[2]/(r_[2]+r_[4])))
else:
print('\t\tPrecision: %0.6f' % (0.0))
idx += 1
def read_file_csv(self, f):
data = {}
max_lines = 30000000
cur_line = 0
start = time.time()
for line in os.popen('zcat %s' % (f)):
cur_line += 1
if cur_line > max_lines:
break
# if '(0000)' not in line:
# continue
t_ = line.strip().split(',')
src = t_[0]
proc = t_[3]
sha256 = t_[4]
type_ = t_[5]
fp_str = t_[6].replace('()','')
dst_x = t_[7].split(')')
os_ = clean_os_str(t_[8])
if os_ == None:
continue
dst_ip = dst_x[0][1:]
dst_port = int(dst_x[1][1:])
server_name = dst_x[2][1:]
src_port = int(t_[9].split(')')[1][1:])
av_hits = 0
if len(t_) > 10:
av_hits = int(t_[10])
proc = clean_proc_name(proc)
if proc in uninformative_proc_names:
continue
fp_malware_ = False
if self.malware_ctx != None:
if fp_str in self.malware_ctx:
fp_malware_ = is_fp_malware(self.malware_ctx[fp_str])
else:
continue
app_cat = None
if proc in self.app_cat_data:
app_cat = self.app_cat_data[proc]
malware = is_proc_malware({'process':proc}, fp_malware_, av_hits)
domain = server_name
sni_split = server_name.split('.')
if len(sni_split) > 1:
domain = sni_split[-2] + '.' + sni_split[-1]
if server_name in sni_whitelist or domain in domain_whitelist:
malware = False
app_cats = {}
app_cats['malware'] = malware
for c in self.categories:
if c == 'malware':
app_cats[c] = malware
else:
app_cats[c] = False
if c == app_cat:
app_cats[c] = True
if os_ == None:
continue
if src not in data:
data[src] = []
data[src].append((src,src_port,proc,sha256,type_,fp_str,dst_ip,dst_port,server_name,1,os_,app_cats, self.proc_list))
print('time to read data:\t%0.2f' % (time.time()-start))
return data
def read_file_json(self, f):
data = {}
start = time.time()
key_ = 0
data[key_] = []
for line in os.popen('zcat %s' % (f)):
fp_ = json.loads(line)
if 'str_repr' in fp_:
fp_str = fp_['str_repr']
else:
fp_str = fp_['md5']
if 'process_info' in fp_:
new_procs = []
fp_malware_ = is_fp_malware(fp_)
for p_ in fp_['process_info']:
if 'process' not in p_:
p_['process'] = p_['filename']
p_['process'] = clean_proc_name(p_['process'])
if is_proc_malware(p_, fp_malware_):
new_procs.extend(clean_malware_proc(p_))
else:
new_procs.append(p_)
fp_['process_info'] = new_procs
for p_ in fp_['process_info']:
proc = p_['process']
sha256 = p_['sha256']
if p_['process'] in uninformative_proc_names:
continue
# uncomment to classify non-top processes
# pn = proc
# pn = app_families[pn] if pn in app_families else pn
# if pn in ['Chromium','Firefox','Safari','Internet Explorer','Adobe Tools',
# 'Microsoft Office','Cisco Webex','Cisco AMP','iCloud','Box']:
# continue
app_cat = None
if proc in self.app_cat_data:
app_cat = self.app_cat_data[proc]
malware = is_proc_malware(p_, False)
app_cats = {}
app_cats['malware'] = malware
for c in self.categories:
if c == 'malware':
app_cats[c] = malware
else:
app_cats[c] = False
if c == app_cat:
app_cats[c] = True
for x_ in p_['dst_info']:
dst_x = x_['dst'].split(')')
dst_ip = dst_x[0][1:]
dst_port = int(dst_x[1][1:])
server_name = dst_x[2][1:]
data[key_].append((None,None,proc,sha256,'tls',fp_str,dst_ip,dst_port,
server_name,x_['count'],None,app_cats,self.proc_list))
if len(data[key_]) > 5000:
key_ += 1
data[key_] = []
print('time to read data:\t%0.2f' % (time.time()-start))
return data
def read_file_dmz_json(self, f):
data = {}
key_ = 0
data[key_] = []
start = time.time()
for line in os.popen('zcat %s' % (f)):
fp_ = json.loads(line)
if 'str_repr' in fp_:
fp_str = fp_['str_repr']
else:
fp_str = fp_['md5']
if fp_str in schannel_fps:
fp_str = 'schannel'
proc = 'dmz_process'
sha256 = 'dmz_process'
app_cats = {}
app_cats['malware'] = False
# if fp_str not in data:
# data[fp_str] = []
dst_info_key = 'dmz_dst_info'
if dst_info_key not in fp_:
dst_info_key = 'dst_info'
for x_ in fp_[dst_info_key]:
dst_x = x_['dst'].split(')')
dst_ip = dst_x[0][1:]
dst_port = int(dst_x[1][1:])
server_name = dst_x[2][1:]
# data[fp_str].append((None,None,proc,sha256,'tls',fp_str,dst_ip,dst_port,
# server_name,x_['count'],None,app_cats))
data[key_].append((None,None,proc,sha256,'tls',fp_str,dst_ip,dst_port,
server_name,x_['count'],None,app_cats,self.proc_list))
if len(data[key_]) > 5000:
key_ += 1
data[key_] = []
print('time to read data:\t%0.2f' % (time.time()-start))
return data
def get_results(data):
results = []
for d_ in data:
src_ip = d_[0]
src_port = d_[1]
proc = d_[2]
sha256 = d_[3]
type_ = d_[4]
str_repr = d_[5]
dst_ip = d_[6]
dst_port = d_[7]
server_name = d_[8]
cnt = d_[9]
os_ = d_[10]
app_cats = d_[11]
target_proc = d_[12]
protocol = 6
ts = 0.00
# if '()(' in str_repr:
# continue
# uncomment to get results for only approximate matches
# if str_repr in fingerprinter.parsers['tls'].fp_db and 'approx_str' not in fingerprinter.parsers['tls'].fp_db[str_repr]:
# continue
# uncomment to get results without approximate matches
fp_ = fingerprinter.get_database_entry(str_repr, type_)
if fp_ == None or 'approx_str' in fp_:
continue
flow = fingerprinter.process_csv(type_, str_repr, src_ip, dst_ip, src_port, dst_port,
protocol, ts, {'server_name': server_name})
if 'analysis' not in flow:
continue
r_ = flow['analysis']
if 'probable_processes' not in r_:
continue
pi_ = r_['probable_processes'][0]
# uncomment to ignore lower scores
# if pi_['score'] < 0.99:
# continue
app_cat = 'None'
for k in app_cats:
if app_cats[k] == True:
app_cat = k
o_ = {}
o_['count'] = cnt
o_['fp_str'] = str_repr
o_['score'] = pi_['score']
o_['ground_truth'] = {'process': proc, 'sha256': sha256, 'server_name': server_name, 'dst_ip': dst_ip}
o_['ground_truth']['categories'] = {'malware': app_cats['malware'], app_cat: True}
if target_proc != None:
for test_proc in target_proc:
o_['ground_truth']['categories'][test_proc[0]] = False
if proc in test_proc:
o_['ground_truth']['categories'][test_proc[0]] = True
o_['inferred_truth'] = {'process': pi_['process'].lower(), 'sha256': pi_['sha256'], 'probable_processes': r_['probable_processes']}
o_['inferred_truth']['categories'] = {}
if target_proc != None:
for test_proc in target_proc:
o_['inferred_truth']['categories'][test_proc[0]] = False
if pi_['process'] in test_proc:
o_['inferred_truth']['categories'][test_proc[0]] = True
o_['inferred_truth']['categories'][pi_['category']] = True
if 'malware' in pi_:
o_['inferred_truth']['categories']['malware'] = pi_['malware']
else:
o_['inferred_truth']['categories']['malware'] = False
results.append(o_)
return tuple(results)
def get_results_blacklist(data):
global fp_sni_blacklist
results = []
for d_ in data:
src_ip = d_[0]
src_port = d_[1]
proc = d_[2]
sha256 = d_[3]
type_ = d_[4]
str_repr = d_[5]
dst_ip = d_[6]
dst_port = d_[7]
server_name = d_[8]
cnt = d_[9]
os_ = d_[10]
app_cats = d_[11]
protocol = 6
ts = 0.00
o_ = {}
o_['count'] = cnt
o_['fp_str'] = str_repr
o_['score'] = 0.0
o_['ground_truth'] = {'process': proc, 'sha256': sha256, 'server_name': server_name}
o_['ground_truth']['categories'] = {'malware': app_cats['malware']}
o_['inferred_truth'] = {'process': 'n/a', 'sha256': 'n/a'}
# k = '%s,%s' % (str_repr, server_name)
k = '%s,%s' % (str_repr, dst_ip)
# k = '%s' % (dst_ip)
if k in fp_sni_blacklist:
o_['inferred_truth']['categories'] = {'malware': True}
else:
o_['inferred_truth']['categories'] = {'malware': False}
results.append(o_)
return results
def get_results_top(data):
results = []
for d_ in data:
proc = d_[2]
sha256 = d_[3]
type_ = d_[4]
str_repr = d_[5]
dst_ip = d_[6]
server_name = d_[8]
cnt = d_[9]
app_cats = d_[11]
target_proc = d_[12]
fp_ = fingerprinter.get_database_entry(str_repr, type_)
if fp_ == None:
continue
if 'process_info' not in fp_:
continue
pi_ = fp_['process_info'][0]
if pi_['process'] == 'Generic DMZ Traffic':
pi_ = fp_['process_info'][1]
pi_['malware'] = fp_['process_info'][0]['malware']
if 'application_category' not in pi_:
pi_['application_category'] = 'None'
app_cat = 'None'
for k in app_cats:
if app_cats[k] == True:
app_cat = k
o_ = {}
o_['count'] = cnt
o_['fp_str'] = str_repr
o_['score'] = 0.0
o_['ground_truth'] = {'process': proc, 'sha256': sha256, 'server_name': server_name, 'dst_ip': dst_ip}
o_['ground_truth']['categories'] = {'malware': app_cats['malware'], app_cat: True}
if target_proc != None:
for test_proc in target_proc:
o_['ground_truth']['categories'][test_proc[0]] = False
if proc in test_proc:
o_['ground_truth']['categories'][test_proc[0]] = True
o_['inferred_truth'] = {'process': pi_['process'].lower(), 'sha256': pi_['sha256s']}
o_['inferred_truth']['categories'] = {}
if target_proc != None:
for test_proc in target_proc:
o_['inferred_truth']['categories'][test_proc[0]] = False
if pi_['process'] in test_proc:
o_['inferred_truth']['categories'][test_proc[0]] = True
o_['inferred_truth']['categories'][pi_['application_category']] = True
if 'malware' in pi_:
o_['inferred_truth']['categories']['malware'] = pi_['malware']
else:
o_['inferred_truth']['categories']['malware'] = False
o_['inferred_truth']['probable_processes'] = []
for p_ in fp_['process_info'][0:5]:
o_['inferred_truth']['probable_processes'].append({'process': p_['process']})
results.append(o_)
return results
verbose_out = open('verbose_out.txt','w')
def clean_proc(p):
return p
p = p.lower().replace('.exe','')
if p.endswith('d'):
return p[:-1]
return p
def process_result(x_):
global app_families
global app_families_strict
sl = x_[0]
cats = x_[1]
results = []
for r in sl:
if r == None:
continue
count = r['count']
tmp_oproc_gt = r['ground_truth']['process']
oproc_gt = app_families_strict[tmp_oproc_gt] if tmp_oproc_gt in app_families_strict else tmp_oproc_gt
gproc_gt = clean_proc(app_families[tmp_oproc_gt] if tmp_oproc_gt in app_families else tmp_oproc_gt)
proc_gt = clean_proc(oproc_gt)
sha_gt = r['ground_truth']['sha256']
tmp_oproc_nf = r['inferred_truth']['process']
oproc_nf = app_families_strict[tmp_oproc_nf] if tmp_oproc_nf in app_families_strict else tmp_oproc_nf
gproc_nf = clean_proc(app_families[tmp_oproc_nf] if tmp_oproc_nf in app_families else tmp_oproc_nf)
proc_nf = clean_proc(oproc_nf)
sha_nf = r['inferred_truth']['sha256']
proc_nf2 = None
gproc_nf2 = None
if len(r['inferred_truth']['probable_processes']) > 1:
tmp_oproc_nf2 = r['inferred_truth']['probable_processes'][1]['process']
oproc_nf2 = app_families_strict[tmp_oproc_nf2] if tmp_oproc_nf2 in app_families_strict else tmp_oproc_nf2
gproc_nf2 = clean_proc(app_families[tmp_oproc_nf2] if tmp_oproc_nf2 in app_families else tmp_oproc_nf2)
proc_nf2 = clean_proc(oproc_nf2)
proc_nf3 = None
gproc_nf3 = None
if len(r['inferred_truth']['probable_processes']) > 2:
tmp_oproc_nf3 = r['inferred_truth']['probable_processes'][2]['process']
oproc_nf3 = app_families_strict[tmp_oproc_nf3] if tmp_oproc_nf3 in app_families_strict else tmp_oproc_nf3
gproc_nf3 = clean_proc(app_families[tmp_oproc_nf3] if tmp_oproc_nf3 in app_families else tmp_oproc_nf3)
proc_nf3 = clean_proc(oproc_nf3)
proc_nf4 = None
gproc_nf4 = None
if len(r['inferred_truth']['probable_processes']) > 3:
tmp_oproc_nf4 = r['inferred_truth']['probable_processes'][3]['process']
oproc_nf4 = app_families_strict[tmp_oproc_nf4] if tmp_oproc_nf4 in app_families_strict else tmp_oproc_nf4
gproc_nf4 = clean_proc(app_families[tmp_oproc_nf4] if tmp_oproc_nf4 in app_families else tmp_oproc_nf4)
proc_nf4 = clean_proc(oproc_nf4)
proc_nf5 = None
gproc_nf5 = None
if len(r['inferred_truth']['probable_processes']) > 4:
tmp_oproc_nf5 = r['inferred_truth']['probable_processes'][4]['process']
oproc_nf5 = app_families_strict[tmp_oproc_nf5] if tmp_oproc_nf5 in app_families_strict else tmp_oproc_nf5
gproc_nf5 = clean_proc(app_families[tmp_oproc_nf5] if tmp_oproc_nf5 in app_families else tmp_oproc_nf5)
proc_nf5 = clean_proc(oproc_nf5)
r_proc = r['count'] if proc_gt == proc_nf else 0
r_gproc = r['count'] if gproc_gt == gproc_nf else 0
r_sha = r['count'] if sha_gt == sha_nf else 0
r_proc2 = 0
if r_proc == 0:
r_proc2 = r['count'] if proc_gt == proc_nf2 else 0
r_gproc2 = 0
if r_gproc == 0:
r_gproc2 = r['count'] if gproc_gt == gproc_nf2 else 0
r_proc3 = 0
if r_proc == 0 and r_proc2 == 0:
r_proc3 = r['count'] if proc_gt == proc_nf3 else 0
r_gproc3 = 0
if r_gproc == 0 and r_gproc2 == 0:
r_gproc3 = r['count'] if gproc_gt == gproc_nf3 else 0
r_proc4 = 0
if r_proc == 0 and r_proc2 == 0 and r_proc3 == 0:
r_proc4 = r['count'] if proc_gt == proc_nf4 else 0
r_gproc4 = 0
if r_gproc == 0 and r_gproc2 == 0 and r_gproc3 == 0:
r_gproc4 = r['count'] if gproc_gt == gproc_nf4 else 0
r_proc5 = 0
if r_proc == 0 and r_proc2 == 0 and r_proc3 == 0 and r_proc4 == 0:
r_proc5 = r['count'] if proc_gt == proc_nf5 else 0
r_gproc5 = 0
if r_gproc == 0 and r_gproc2 == 0 and r_gproc3 == 0 and r_gproc4 == 0:
r_gproc5 = r['count'] if gproc_gt == gproc_nf5 else 0
# if oproc_gt != oproc_nf:
# verbose_out.write('%i,%s,%s,%f\n' % (count, tmp_oproc_gt.replace(',',''), tmp_oproc_nf, r['score']))
# verbose_out.write('%i,%s,%s,%f\n' % (count, tmp_oproc_gt.replace(',',''), tmp_oproc_nf, r['score']))
# if oproc_gt != oproc_nf:
# if gproc_gt != gproc_nf:
# verbose_out.write('%i,%s,%s,%s,%s,%f,%s,%s\n' % (count, tmp_oproc_gt.replace(',',''), tmp_oproc_nf, r['ground_truth']['server_name'],
# r['ground_truth']['dst_ip'], r['score'],sha_gt,r['fp_str']))
# verbose_out.flush()
r_cats = []
for c in cats:
if c == '':
continue
c_gt = False
c_nf = False
if c in r['ground_truth']['categories']:
c_gt = r['ground_truth']['categories'][c]
if c in r['inferred_truth']['categories']:
c_nf = r['inferred_truth']['categories'][c]
r_cat_a = r['count'] if c_gt == c_nf else 0
r_cat_tp = r['count'] if c_gt == True and c_nf == True else 0
r_cat_tn = r['count'] if c_gt == False and c_nf == False else 0
r_cat_fp = r['count'] if c_gt == False and c_nf == True else 0
r_cat_fn = r['count'] if c_gt == True and c_nf == False else 0
r_cats.append([r['count'], r_cat_a, r_cat_tp, r_cat_tn, r_cat_fp, r_cat_fn])
# if c_gt == True or c_nf == True:
if c_gt != c_nf:
verbose_out.write('%i,%s,%s,%s,%s,%f,%s,%s\n' % (count, tmp_oproc_gt.replace(',',''), tmp_oproc_nf, r['ground_truth']['server_name'],
r['ground_truth']['dst_ip'], r['score'],sha_gt,r['fp_str']))
# verbose_out.write('%i,%s,%s,%f\n' % (count, tmp_oproc_gt.replace(',',''), tmp_oproc_nf, r['score']))
verbose_out.flush()
# if c_gt == False and c_nf == True:
# if c_gt == True and c_nf == False:
# if c_gt == False and c_nf == False:
# verbose_out.write('%s\n' % (sha_gt))
# verbose_out.write('%i,%s,%s,%s,%s,%f,%s,%s\n' % (count, tmp_oproc_gt.replace(',',''), tmp_oproc_nf, r['ground_truth']['server_name'],
# r['ground_truth']['dst_ip'], r['score'],sha_gt,r['fp_str']))
# verbose_out.flush()
results.append((r['count'], r_proc, r_gproc, r_sha, r_proc2, r_gproc2, r_proc3, r_gproc3, r_proc4, r_gproc4, r_proc5, r_gproc5, r_cats))
return results
def main():
global fingerprinter
global verbose_out
start = time.time()
parser = optparse.OptionParser()
parser.add_option('-i','--input',action='store',dest='input',
help='daily fingerprint file',default=None)
parser.add_option('-o','--output',action='store',dest='output',
help='output file',default=sys.stdout)
parser.add_option('-f','--fp_db',action='store',dest='fp_db',
help='location of fingerprint database',default='../../../resources/fingerprint_db.json.gz')
parser.add_option('-c','--categories',action='store',dest='categories',
help='test 1-vs-all on specific category, e.g., vpn',default='')
parser.add_option('-p','--process',action='store',dest='process',
help='test on specific processes, e.g., firefox,firefox.exe',default=None)
parser.add_option('-e','--endpoint',action='store_true',dest='endpoint',
help='enable endpoint modeling',default=False)
parser.add_option('-t','--top',action='store_true',dest='top',
help='report most prevalent process',default=False)
parser.add_option('-b','--blacklist',action='store_true',dest='blacklist',
help='use fp/sni blacklist',default=False)
parser.add_option('-m','--malware_context',action='store',dest='malware_context',
help='malware context',default=None)
options, args = parser.parse_args()
if options.input == None:
print('error: need to specify input')
if options.endpoint and options.input.endswith('.json.gz'):
print('warning: endpoint modeling not available for json format')
options.endpoint = False
if options.blacklist:
for line in open('data/fp_ip_blacklist.csv','r'):
fp_sni_blacklist.add(line.strip())
importlib.machinery.SOURCE_SUFFIXES.append('')
pmercury = importlib.import_module('..pmercury','pmercury.pmercury')
fingerprinter = pmercury.Fingerprinter(options.fp_db, 'test.out', True, num_procs=5, human_readable=False,
group=False, experimental=False, endpoint=options.endpoint)
tester = Validation(options.input, options.fp_db, options.output, options.categories.split(','), options.top, options.blacklist,
options.malware_context, options.process)
tester.validate_process_identification()
verbose_out.close()
if options.endpoint:
fingerprinter.endpoint_model.write_all(fingerprinter.endpoint_file_pointer)
if __name__ == '__main__':
sys.exit(main())
|
"""elgato package."""
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/datastore_admin_v1/proto/index.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/datastore_admin_v1/proto/index.proto",
package="google.datastore.admin.v1",
syntax="proto3",
serialized_options=b"\n\035com.google.datastore.admin.v1B\nIndexProtoP\001Z>google.golang.org/genproto/googleapis/datastore/admin/v1;admin\252\002\037Google.Cloud.Datastore.Admin.V1\352\002#Google::Cloud::Datastore::Admin::V1",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n1google/cloud/datastore_admin_v1/proto/index.proto\x12\x19google.datastore.admin.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1cgoogle/api/annotations.proto"\xe6\x04\n\x05Index\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x15\n\x08index_id\x18\x03 \x01(\tB\x03\xe0\x41\x03\x12\x11\n\x04kind\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x44\n\x08\x61ncestor\x18\x05 \x01(\x0e\x32-.google.datastore.admin.v1.Index.AncestorModeB\x03\xe0\x41\x02\x12I\n\nproperties\x18\x06 \x03(\x0b\x32\x30.google.datastore.admin.v1.Index.IndexedPropertyB\x03\xe0\x41\x02\x12:\n\x05state\x18\x07 \x01(\x0e\x32&.google.datastore.admin.v1.Index.StateB\x03\xe0\x41\x03\x1ah\n\x0fIndexedProperty\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x42\n\tdirection\x18\x02 \x01(\x0e\x32*.google.datastore.admin.v1.Index.DirectionB\x03\xe0\x41\x02"J\n\x0c\x41ncestorMode\x12\x1d\n\x19\x41NCESTOR_MODE_UNSPECIFIED\x10\x00\x12\x08\n\x04NONE\x10\x01\x12\x11\n\rALL_ANCESTORS\x10\x02"E\n\tDirection\x12\x19\n\x15\x44IRECTION_UNSPECIFIED\x10\x00\x12\r\n\tASCENDING\x10\x01\x12\x0e\n\nDESCENDING\x10\x02"P\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02\x12\x0c\n\x08\x44\x45LETING\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x42\xb5\x01\n\x1d\x63om.google.datastore.admin.v1B\nIndexProtoP\x01Z>google.golang.org/genproto/googleapis/datastore/admin/v1;admin\xaa\x02\x1fGoogle.Cloud.Datastore.Admin.V1\xea\x02#Google::Cloud::Datastore::Admin::V1b\x06proto3',
dependencies=[
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_INDEX_ANCESTORMODE = _descriptor.EnumDescriptor(
name="AncestorMode",
full_name="google.datastore.admin.v1.Index.AncestorMode",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="ANCESTOR_MODE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="NONE",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="ALL_ANCESTORS",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=531,
serialized_end=605,
)
_sym_db.RegisterEnumDescriptor(_INDEX_ANCESTORMODE)
_INDEX_DIRECTION = _descriptor.EnumDescriptor(
name="Direction",
full_name="google.datastore.admin.v1.Index.Direction",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="DIRECTION_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="ASCENDING",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="DESCENDING",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=607,
serialized_end=676,
)
_sym_db.RegisterEnumDescriptor(_INDEX_DIRECTION)
_INDEX_STATE = _descriptor.EnumDescriptor(
name="State",
full_name="google.datastore.admin.v1.Index.State",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="STATE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CREATING",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="READY",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="DELETING",
index=3,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="ERROR",
index=4,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=678,
serialized_end=758,
)
_sym_db.RegisterEnumDescriptor(_INDEX_STATE)
_INDEX_INDEXEDPROPERTY = _descriptor.Descriptor(
name="IndexedProperty",
full_name="google.datastore.admin.v1.Index.IndexedProperty",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.datastore.admin.v1.Index.IndexedProperty.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\002",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="direction",
full_name="google.datastore.admin.v1.Index.IndexedProperty.direction",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\002",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=425,
serialized_end=529,
)
_INDEX = _descriptor.Descriptor(
name="Index",
full_name="google.datastore.admin.v1.Index",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="project_id",
full_name="google.datastore.admin.v1.Index.project_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index_id",
full_name="google.datastore.admin.v1.Index.index_id",
index=1,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="kind",
full_name="google.datastore.admin.v1.Index.kind",
index=2,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\002",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="ancestor",
full_name="google.datastore.admin.v1.Index.ancestor",
index=3,
number=5,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\002",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="properties",
full_name="google.datastore.admin.v1.Index.properties",
index=4,
number=6,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\002",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.datastore.admin.v1.Index.state",
index=5,
number=7,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_INDEX_INDEXEDPROPERTY,],
enum_types=[_INDEX_ANCESTORMODE, _INDEX_DIRECTION, _INDEX_STATE,],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=144,
serialized_end=758,
)
_INDEX_INDEXEDPROPERTY.fields_by_name["direction"].enum_type = _INDEX_DIRECTION
_INDEX_INDEXEDPROPERTY.containing_type = _INDEX
_INDEX.fields_by_name["ancestor"].enum_type = _INDEX_ANCESTORMODE
_INDEX.fields_by_name["properties"].message_type = _INDEX_INDEXEDPROPERTY
_INDEX.fields_by_name["state"].enum_type = _INDEX_STATE
_INDEX_ANCESTORMODE.containing_type = _INDEX
_INDEX_DIRECTION.containing_type = _INDEX
_INDEX_STATE.containing_type = _INDEX
DESCRIPTOR.message_types_by_name["Index"] = _INDEX
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Index = _reflection.GeneratedProtocolMessageType(
"Index",
(_message.Message,),
{
"IndexedProperty": _reflection.GeneratedProtocolMessageType(
"IndexedProperty",
(_message.Message,),
{
"DESCRIPTOR": _INDEX_INDEXEDPROPERTY,
"__module__": "google.cloud.datastore_admin_v1.proto.index_pb2",
"__doc__": """A property of an index.
Attributes:
name:
Required. The property name to index.
direction:
Required. The indexed property’s direction. Must not be
DIRECTION_UNSPECIFIED.
""",
# @@protoc_insertion_point(class_scope:google.datastore.admin.v1.Index.IndexedProperty)
},
),
"DESCRIPTOR": _INDEX,
"__module__": "google.cloud.datastore_admin_v1.proto.index_pb2",
"__doc__": """A minimal index definition.
Attributes:
project_id:
Output only. Project ID.
index_id:
Output only. The resource ID of the index.
kind:
Required. The entity kind to which this index applies.
ancestor:
Required. The index’s ancestor mode. Must not be
ANCESTOR_MODE_UNSPECIFIED.
properties:
Required. An ordered sequence of property names and their
index attributes.
state:
Output only. The state of the index.
""",
# @@protoc_insertion_point(class_scope:google.datastore.admin.v1.Index)
},
)
_sym_db.RegisterMessage(Index)
_sym_db.RegisterMessage(Index.IndexedProperty)
DESCRIPTOR._options = None
_INDEX_INDEXEDPROPERTY.fields_by_name["name"]._options = None
_INDEX_INDEXEDPROPERTY.fields_by_name["direction"]._options = None
_INDEX.fields_by_name["project_id"]._options = None
_INDEX.fields_by_name["index_id"]._options = None
_INDEX.fields_by_name["kind"]._options = None
_INDEX.fields_by_name["ancestor"]._options = None
_INDEX.fields_by_name["properties"]._options = None
_INDEX.fields_by_name["state"]._options = None
# @@protoc_insertion_point(module_scope)
|
# Copyright (c) 2016 Huawei Technologies India Pvt.Limited.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from osc_lib.tests import utils as tests_utils
from networking_sfc.osc import common
from networking_sfc.osc.sfc import port_pair
from networking_sfc.tests.unit.osc import fakes
def _get_id(client, id_or_name, resource):
return id_or_name
class TestListPortPair(fakes.TestNeutronClientOSCV2):
_port_pairs = fakes.FakePortPair.create_port_pairs(count=1)
columns = ('ID', 'Name', 'Ingress Logical Port', 'Egress Logical Port')
data = []
_port_pair = _port_pairs['port_pairs'][0]
data.append((
_port_pair['id'],
_port_pair['name'],
_port_pair['ingress'],
_port_pair['egress']))
_port_pair1 = {'port_pairs': _port_pair}
_port_pair_id = _port_pair['id'],
def setUp(self):
super(TestListPortPair, self).setUp()
self.neutronclient.list_ext = mock.Mock(
return_value=self._port_pair1
)
# Get the command object to test
self.cmd = port_pair.ListPortPair(self.app, self.namespace)
def test_port_pair_list(self):
client = self.app.client_manager.neutronclient
mock_port_pair_list = client.list_ext
parsed_args = self.check_parser(self.cmd, [], [])
columns = self.cmd.take_action(parsed_args)
data = mock_port_pair_list.assert_called_once_with(
collection='port_pairs', path='/sfc/port_pairs', retrieve_all=True)
self.assertEqual(self.columns, columns[0])
self.assertIsNone(data)
class TestCreatePortPair(fakes.TestNeutronClientOSCV2):
# The new port_pair created
_port_pair = fakes.FakePortPair.create_port_pair()
columns = (
'id',
'name',
'description',
'ingress',
'egress',
'service_function_parameter',
)
def get_data(self):
return (
self._port_pair['id'],
self._port_pair['name'],
self._port_pair['description'],
self._port_pair['ingress'],
self._port_pair['egress'],
self._port_pair['service_function_parameter'],
)
def setUp(self):
super(TestCreatePortPair, self).setUp()
mock.patch('networking_sfc.osc.common.get_id',
new=_get_id).start()
common.create_sfc_resource = mock.Mock(
return_value={'port_pairs': self._port_pair})
self.data = self.get_data()
# Get the command object to test
self.cmd = port_pair.CreatePortPair(self.app, self.namespace)
def test_create_port_pair_with_no_args(self):
arglist = []
verifylist = []
self.assertRaises(tests_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
class TestDeletePortPair(fakes.TestNeutronClientOSCV2):
def setUp(self):
super(TestDeletePortPair, self).setUp()
_port_pair = fakes.FakePortPair.create_port_pairs()
self._port_pair = _port_pair['port_pairs'][0]
_port_pair_id = self._port_pair['id']
common.delete_sfc_resource = mock.Mock(return_value=None)
common.find_sfc_resource = mock.Mock(return_value=_port_pair_id)
self.cmd = port_pair.DeletePortPair(self.app, self.namespace)
def test_delete_port_pair(self):
client = self.app.client_manager.neutronclient
mock_port_pair_delete = common.delete_sfc_resource
arglist = [
self._port_pair['id'],
]
verifylist = [
('port_pair', self._port_pair['id']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
mock_port_pair_delete.assert_called_once_with(client,
'port_pair',
self._port_pair['id'])
self.assertIsNone(result)
class TestShowPortPair(fakes.TestNeutronClientOSCV2):
_pp = fakes.FakePortPair.create_port_pair()
data = (
_pp['description'],
_pp['egress'],
_pp['id'],
_pp['ingress'],
_pp['name'],
_pp['project_id'],
_pp['service_function_parameter']
)
_port_pair = {'port_pair': _pp}
_port_pair_id = _pp['id']
columns = (
'description',
'egress',
'id',
'ingress',
'name',
'project_id',
'service_function_parameter'
)
def setUp(self):
super(TestShowPortPair, self).setUp()
common.find_sfc_resource = mock.Mock(return_value=self._port_pair_id)
common.show_sfc_resource = mock.Mock(
return_value=self._port_pair
)
# Get the command object to test
self.cmd = port_pair.ShowPortPair(self.app, self.namespace)
def test_port_pair_show(self):
client = self.app.client_manager.neutronclient
mock_port_pair_show = common.show_sfc_resource
arglist = [
self._port_pair_id,
]
verifylist = [
('port_pair', self._port_pair_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
data = self.cmd.take_action(parsed_args)
mock_port_pair_show.assert_called_once_with(client,
'port_pair',
self._port_pair_id)
self.assertEqual(self.columns, data[0])
self.assertEqual(self.data, data[1])
class TestUpdatePortPair(fakes.TestNeutronClientOSCV2):
_port_pair = fakes.FakePortPair.create_port_pair()
_port_pair_name = _port_pair['name']
_port_pair_id = _port_pair['id']
def setUp(self):
super(TestUpdatePortPair, self).setUp()
common.update_sfc_resource = mock.Mock(return_value=None)
common.find_sfc_resource = mock.Mock(return_value=self._port_pair_id)
self.cmd = port_pair.UpdatePortPair(self.app, self.namespace)
def test_update_port_pair(self):
client = self.app.client_manager.neutronclient
mock_port_pair_update = common.update_sfc_resource
arglist = [
self._port_pair_name,
'--name', 'name_updated',
'--description', 'desc_updated'
]
verifylist = [
('port_pair', self._port_pair_name),
('name', 'name_updated'),
('description', 'desc_updated'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'name': 'name_updated',
'description': 'desc_updated'}
mock_port_pair_update.assert_called_once_with(client,
'port_pair', attrs,
self._port_pair_id)
self.assertIsNone(result)
|
import json
import os
HOME_DIR = os.environ.get('HAT_HOME') or "/etc/hive-attention-tokens"
CONFIG_FIELDS = [
'witness_name', 'signing_key', 'public_signing_key',
'ssl_cert', 'ssl_key', 'server_port', 'server_host',
'db_username', 'db_password'
]
class Config:
# TODO: split witness_config from server_config
config = {}
@classmethod
def load_config(cls, config_file):
values = {}
if not os.path.isdir(HOME_DIR):
os.mkdir(HOME_DIR)
if not os.path.exists(config_file):
new_conf = open(config_file, 'w')
new_conf.writelines(f"{field}=\n" for field in CONFIG_FIELDS)
new_conf.close()
print(
'No config file detected. A blank one has been created.\n'
'Populate it with the correct details and restart hive-attention-tokens.'
)
os._exit(1)
f = open(config_file, 'r').readlines()
for line in f:
if '=' in line:
setting = line.split('=')
_key = setting[0]
assert _key in CONFIG_FIELDS, f"invalid config key detected {_key}"
_value = setting[1].strip('\n ')
if '[' in _value or '{' in _value:
values[_key] = json.loads(_value)
else:
values[_key] = _value
cls.config = values
Config.load_config(HOME_DIR + "/config.ini")
|
from Jumpscale import j
class {{shorturl}}_model(j.baseclasses.threebot_actor):
def _init(self, **kwargs):
#get bcdb from package
self.bcdb = self.package.bcdb
self.model = self.bcdb.model_get(url="{{model.schema.url}}")
@j.baseclasses.actor_method
def new(self,schema_out=None, user_session=None,**kwargs):
"""
```in
{{fields_schema}}
```
```out
res = (O) !{{model.schema.url}}
```
"""
assert user_session.admin #for now only allow admin
return self.model.set_dynamic(kwargs)
@j.baseclasses.actor_method
def set(self, object_id=None,values=None ,schema_out=None, user_session=None):
"""
```in
object_id = 0
values = (dict)
```
```out
res = (O) !{{model.schema.url}}
```
"""
# TODO: use user_session for authentication
assert user_session.admin #for now only allow admin
obj = self.model.get(object_id)
for key, val in values.items():
setattr(obj, key, val)
obj.save()
return obj
@j.baseclasses.actor_method
def get_by_name(self, name=None,schema_out=None, user_session=None):
"""
```in
name = (S)
```
```out
res = (O) !{{model.schema.url}}
```
"""
assert user_session.admin #for now only allow admin
return self.model.get_by_name(name)
@j.baseclasses.actor_method
def get(self, object_id=None,schema_out=None, user_session=None):
"""
```in
object_id = 0
```
```out
res = (O) !{{model.schema.url}}
```
"""
assert user_session.admin #for now only allow admin
return self.model.get(object_id)
@j.baseclasses.actor_method
def find(self, query=None,schema_out=None, user_session=None):
"""
```in
query = (dict)
```
```out
res = (LO) !{{model.schema.url}}
```
"""
assert user_session.admin #for now only allow admin
return self.model.find(query)
@j.baseclasses.actor_method
def delete(self, object_id=None,schema_out=None, user_session=None):
"""
```in
object_id = 0
```
"""
assert user_session.admin #for now only allow admin
obj = self.model.get(object_id)
obj.delete()
@j.baseclasses.actor_method
def destroy(self, schema_out=None, user_session=None):
assert user_session.admin #for now only allow admin
return self.model.destroy()
@j.baseclasses.actor_method
def count(self, schema_out=None, user_session=None):
assert user_session.admin # for now only allow admin
return self.model.count()
@j.baseclasses.actor_method
def exists(self, object_id=None, schema_out=None, user_session=None):
"""
```in
object_id = 0
```
"""
assert user_session.admin # for now only allow admin
return self.model.exists(object_id)
|
from options import option_namespaces, option_subjects
from options.cache import FREQUENT_CACHE_TTL
from options.option import NAMESPACE_DB_OPTION_MARKER, Option, OptionStores
from options.types import CONF_TYPES
AUTH_AZURE_ENABLED = '{}{}{}'.format(option_namespaces.AUTH_AZURE,
NAMESPACE_DB_OPTION_MARKER,
option_subjects.ENABLED)
AUTH_AZURE_VERIFICATION_SCHEDULE = '{}{}{}'.format(option_namespaces.AUTH_AZURE,
NAMESPACE_DB_OPTION_MARKER,
option_subjects.VERIFICATION_SCHEDULE)
AUTH_AZURE_TENANT_ID = '{}{}{}'.format(option_namespaces.AUTH_AZURE,
NAMESPACE_DB_OPTION_MARKER,
option_subjects.TENANT_ID)
AUTH_AZURE_CLIENT_ID = '{}{}{}'.format(option_namespaces.AUTH_AZURE,
NAMESPACE_DB_OPTION_MARKER,
option_subjects.CLIENT_ID)
AUTH_AZURE_CLIENT_SECRET = '{}{}{}'.format(option_namespaces.AUTH_AZURE, # noqa
NAMESPACE_DB_OPTION_MARKER,
option_subjects.CLIENT_SECRET)
class AuthAzureEnabled(Option):
key = AUTH_AZURE_ENABLED
is_global = True
is_secret = False
is_optional = True
is_list = False
typing = CONF_TYPES.BOOL
store = OptionStores.DB_OPTION
default = False
options = None
cache_ttl = FREQUENT_CACHE_TTL
class AuthAzureVerificationSchedule(Option):
key = AUTH_AZURE_VERIFICATION_SCHEDULE
is_global = True
is_secret = False
is_optional = True
is_list = False
typing = CONF_TYPES.INT
store = OptionStores.DB_OPTION
default = 0
options = None
cache_ttl = FREQUENT_CACHE_TTL
class AuthAzureTenantId(Option):
key = AUTH_AZURE_TENANT_ID
is_global = True
is_secret = True
is_optional = True
is_list = False
typing = CONF_TYPES.STR
store = OptionStores.DB_OPTION
default = None
options = None
cache_ttl = FREQUENT_CACHE_TTL
class AuthAzureClientId(Option):
key = AUTH_AZURE_CLIENT_ID
is_global = True
is_secret = True
is_optional = True
is_list = False
typing = CONF_TYPES.STR
store = OptionStores.DB_OPTION
default = None
options = None
cache_ttl = FREQUENT_CACHE_TTL
class AuthAzureClientSecret(Option):
key = AUTH_AZURE_CLIENT_SECRET
is_global = True
is_secret = True
is_optional = True
is_list = False
typing = CONF_TYPES.STR
store = OptionStores.DB_OPTION
default = None
options = None
cache_ttl = FREQUENT_CACHE_TTL
|
from django.conf import settings
from djwailer.core.models import LivewhaleEvents, LivewhaleNews
import sys, datetime
TAGS = {
498:['News & Notices',[]],
499:['Lectures & Presentations',[]],
500:['Arts & Performances',[]],
477:['Kudos',[]],
501:['Faculty News',[]],
502:['Student News',[]],
503:['Student Activities',[]],
504:['Technology',[]],
544:['Top Bridge Story',[]]
}
days = 6
now = datetime.date.today()
past = now - datetime.timedelta(days=days)
#news = LivewhaleNews.objects.using('livewhale').filter(gid=settings.BRIDGE_GROUP).filter(status=1).filter(date_dt__lte=now).exclude(is_archived="1").exclude(date_dt__lte=past)
news = LivewhaleNews.objects.using('livewhale').filter(gid=settings.BRIDGE_GROUP).filter(status=1).filter(date_dt__lte=now).filter(is_archived__isnull=True).exclude(date_dt__lte=past)
for n in news:
tid = n.tag(jid=True)
print "tid = %s " % tid
if n.is_archived:
print "archived? %s" % n.is_archived
if tid:
TAGS[tid][1].append(n)
news = []
for t in TAGS:
news.append(TAGS[t])
print news
#print TAGS[498][0]
#print TAGS[498][1]
|
# -*- coding: utf-8 -*-
import unittest
from thaianalysisrule import parser, word_seg_parser
class TestParserSent(unittest.TestCase):
def test_parser(self):
self.assertIsNotNone(parser(["ผม","เดิน"]))
def test_word_seg_parser(self):\
self.assertIsNotNone(word_seg_parser("ผมเดิน"))
|
#!python
def linear_search(array, item):
"""return the first index of item in array or None if item is not found"""
# implement linear_search_iterative and linear_search_recursive below, then
# change this to call your implementation to verify it passes all tests
return linear_search_iterative(array, item)
# return linear_search_recursive(array, item)
def linear_search_iterative(array, item):
# Time Complexity!
# Best: O(1) - First element
# Average: O(n) - Have to loop through every element in the array
# Worst: O(n) - Have to loop through every element in the array
# loop over all array values until item is found
for index, value in enumerate(array):
if item == value:
return index # found
return None # not found
def linear_search_recursive(array, item, index=0):
# Time Complexity!
# Best: O(1) - First element
# Average: O(n) - Have to loop through every element in the array
# Worst: O(n) - Have to loop through every element in the array
if index >= len(array):
return
if item == array[index]:
return index
else:
nxt = index + 1
return linear_search_recursive(array, item, nxt)
def binary_search(array, item):
"""return the index of item in sorted array or None if item is not found"""
# implement binary_search_iterative and binary_search_recursive below, then
# change this to call your implementation to verify it passes all tests
# return binary_search_iterative(array, item)
return binary_search_recursive(array, item)
def binary_search_iterative(arr, item):
# Time Complexity!
# Best: O(1) - Middle element is the item
# Average: O(log(n)) - Because we halve the working array every time
# Worst: O(log(n)) - Even if it is the last possible iteration, it will still be log(n) time
# Iterative Binary Search Function
# It returns location of x in given array arr if present,
# else returns -1
l = 0
h = len(arr) -1
while l <= h:
mid_pos = (l + h) // 2
mid = arr[mid_pos]
# print('low[{}] mid[{}] high[{}] target[{}] looking at[{}]'.format(l,mid_pos, r, item, mid))
# Check if item is present at mid
if mid == item:
return mid_pos
# If item is greater, ignore left half
elif mid < item:
l = mid_pos + 1
# If item is smaller, ignore right half
else:
h = mid_pos - 1
# If we reach here, then the element was not present
return None
def binary_search_recursive(array, item, left=None, right=None):
# Time Complexity!
# Best: O(1) - Middle element is the item
# Average: O(log(n)) - Because we halve the working array every time
# Worst: O(log(n)) - Even if it is the last possible iteration, it will still be log(n) time
if left is None and right is None:
left = 0
right = len(array) - 1
if left > right:
return None
mid_pos = (right + left) // 2
if array[mid_pos] == item:
return mid_pos
if array[mid_pos] < item:
return binary_search_recursive(array, item, mid_pos + 1, right)
return binary_search_recursive(array, item, left, mid_pos - 1)
if __name__ == "__main__":
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
# arr = [5,3,4,2,6,1]
# x = 4
arr = ['Alex', 'Brian', 'Julia', 'Kojin', 'Nabil', 'Nick', 'Winnie']
# x = 'Julia'
# x = 'Nabil'
x = 'chris'
# print('index: ', linear_search(arr, 2))
result = binary_search(arr, x)
if result != -1:
print ("Element {} is present at index {}".format(x, result))
else:
print ("Element is not present in array")
|
import numpy as np
import matplotlib.pyplot as plt
from typing import List, Tuple, Dict, Any
from sklearn.model_selection import train_test_split
from nndiy import Sequential
from nndiy.layer import Linear
from nndiy.utils import one_hot
YELLOW = "\033[93m"
GREEN = "\033[92m"
RED = "\033[91m"
ENDC = "\033[0m"
class DataGeneration:
def __init__(self, nb_points, eps):
self.x, self.y = None, None
self.nb_points = nb_points
self.eps = eps
def display_data(self):
if self.x is None or self.y is None:
raise ValueError("Data is not generated. Nothing to display")
if self.x.shape[1] > 1:
plt.scatter(self.x[:, 0], self.x[:, 1], marker='.', c=self.y, cmap="brg")
plt.show()
else:
plt.scatter(self.x, self.y, marker='.')
plt.show()
def get_data(self):
return self.x, self.y
class ContinuousGen(DataGeneration):
def __init__(self, nb_points=1000, eps=0.1, sigma=0.1):
super().__init__(nb_points=nb_points, eps=eps)
self.sigma = sigma
def make_sinus(self, freq=2, ampli=1, affine=0.9):
self.x = np.linspace(0, np.pi, self.nb_points).reshape(-1, 1) * freq
self.y = np.sin(self.x + affine) * ampli
self._mix_data()
def make_regression(self, slope=1, affine=0):
self.x = np.linspace(-2, 2, self.nb_points).reshape(-1, 1)
self.y = self.x * slope + affine
self._mix_data()
def _mix_data(self):
self.y += np.random.normal(0, self.sigma, self.x.shape)
idx = np.random.permutation((range(self.y.size)))
self.x = self.x[idx,:]
self.y = self.y[idx,:]
class MultiClassGen(DataGeneration):
def __init__(self, nb_classes, nb_points=1000, eps=0.1):
super().__init__(nb_points=nb_points, eps=eps)
self.nb_classes = nb_classes
def make_vertical(self):
class_size = self.nb_points // self.nb_classes
self.x = np.zeros((class_size * self.nb_classes, 2))
self.y = np.zeros(class_size * self.nb_classes, dtype=np.uint8)
for cl in range(self.nb_classes):
ix = range(class_size * cl, class_size * (cl+1))
self.x[ix] = np.c_[np.random.randn(class_size)/10 + cl/3, np.random.randn(class_size)/10 + 0.5]
self.y[ix] = cl
def make_spiral(self):
class_size = self.nb_points // self.nb_classes
self.x = np.zeros((class_size * self.nb_classes, 2))
self.y = np.zeros(class_size * self.nb_classes, dtype=np.uint8)
for cl in range(self.nb_classes):
ix = range(class_size * cl, class_size * (cl+1))
r = np.linspace(0, 1, class_size)
t = np.linspace(cl * 4, (cl+1) * 4, class_size) + np.random.randn(class_size)*0.2
self.x[ix] = np.c_[r * np.sin(t*2.5), r * np.cos(t*2.5)]
self.y[ix] = cl
class TwoClassGen(MultiClassGen):
def __init__(self, nb_points=1000, eps=0.1):
super().__init__(nb_classes=2, nb_points=nb_points, eps=eps)
def make_2_gaussians(self, center_x=1, sigma=0.1):
x_one = np.random.multivariate_normal(
[center_x, center_x],
np.diag([sigma, sigma]),
self.nb_points // 2)
x_zero = np.random.multivariate_normal(
[-center_x, -center_x],
np.diag([sigma, sigma]),
self.nb_points // 2)
self.x = np.vstack((x_one, x_zero))
self.y = np.hstack(
(np.ones(self.nb_points // 2, dtype=np.uint8),
np.zeros(self.nb_points // 2, dtype=np.uint8)))
self._mix_data()
def make_4_gaussians(self, center_x=1, sigma=0.1):
x_one = np.vstack(
(np.random.multivariate_normal(
[center_x, center_x],
np.diag([sigma,sigma]),
self.nb_points // 4),
np.random.multivariate_normal(
[-center_x, -center_x],
np.diag([sigma, sigma]),
self.nb_points // 4)))
x_zero = np.vstack(
(np.random.multivariate_normal(
[-center_x, center_x],
np.diag([sigma, sigma]),
self.nb_points // 4),
np.random.multivariate_normal(
[center_x, -center_x],
np.diag([sigma, sigma]),
self.nb_points // 4)))
self.x = np.vstack((x_one,x_zero))
self.y = np.hstack(
(np.ones(self.nb_points // 2, dtype=np.uint8),
np.zeros(self.nb_points // 2, dtype=np.uint8)))
self._mix_data()
def make_checker_board(self):
self.x = np.random.uniform(-4 , 4, 2*self.nb_points).reshape((self.nb_points, 2))
y = np.ceil(self.x[:,0]) + np.ceil(self.x[:,1])
self.y = np.array(y % 2, dtype=np.uint8)
self._mix_data()
def _mix_data(self):
self.x[:,0] += np.random.normal(0, self.eps, self.nb_points)
self.x[:,1] += np.random.normal(0, self.eps, self.nb_points)
idx = np.random.permutation((range(self.y.size)))
self.x = self.x[idx, :]
self.y = self.y[idx].reshape(-1, 1)
def binary_classif_score(
Y_hat: np.ndarray,
Y: np.ndarray
):
predictions = np.argmax(Y_hat, axis=1).reshape(-1, 1)
return np.sum(predictions == Y) / Y.shape[0]
def multi_classif_score(
Y_hat: np.ndarray,
Y: np.ndarray
):
oh = not (len(Y.shape) == 1 or Y.shape[1] == 1) # labels are one-hot encoded?
Y = np.argmax(Y, axis=1) if oh else Y.reshape(-1)
predictions = np.argmax(Y_hat, axis=1)
return np.sum(predictions == Y) / Y.shape[0]
def mse_score(
Y_hat: np.ndarray,
Y: np.ndarray
):
return np.mean((Y_hat - Y) ** 2)
def run_test(
test_name: str, # Name of test for displaying purpose
X: np.ndarray, Y: np.ndarray, # Data
layers: List[Tuple[Linear, str]], # NN's layers in a list of tuple (Linear, activation_function_name)
model_kwargs: Dict[str, Any], # Keyword arguments to pass into NN's constructor
compile_kwargs: Dict[str, Any], # Keyword arguments to pass into NN's compile function
fit_kwargs: Dict[str, Any], # Keyword arguments to pass into NN's fit function
train_valid_test=(0.6,0.2,0.2), # Size of train, validation and test set, must sum to 1
target_score=0.85, # Desired NN's score for pass/fail assertion. Can be set to None
scoring_func=mse_score, # Function to calculate prediction's score
scoring_method="lt" # Comparator to NN's target score to decide whether test passes or fails, "lt" or "gt"
):
print(f"Testing {YELLOW}{test_name}{ENDC}:")
r_train, r_valid, r_test = train_valid_test
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=r_test)
X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train, test_size=(r_valid/(r_valid+r_train)))
model = Sequential() if model_kwargs is None else Sequential(**model_kwargs)
for l, a in layers:
model.add(layer=l, activation=a)
model.compile(**compile_kwargs)
model.fit(X_train, Y_train, X_valid, Y_valid, **fit_kwargs)
score = scoring_func(model.predict(X_test), Y_test)
print(" Score: %.4f " % score, end='')
# model.plot_stats()
if target_score is None \
or (scoring_method == "lt" and score <= target_score) \
or (scoring_method == "gt" and score >= target_score):
print(f"{GREEN}OK{ENDC}")
return True
print(f"{RED}KO{ENDC}")
return False
if __name__ == '__main__':
np.random.seed(42)
#############################################################################
print("===== SIMPLE CLASSIFICATION PROBLEM WITH 2 CLASSES =====")
gen2C = TwoClassGen()
gen2C.make_2_gaussians(sigma=0.5)
# gen2C.display_data()
test_params = {
"2 Gaussians, BCE loss, GD": ("binary_crossentropy", "gd"),
"2 Gaussians, BCE loss, SGD": ("binary_crossentropy", "sgd"),
"2 Gaussians, BCE loss, MGD": ("binary_crossentropy", "mgd"),
"2 Gaussians, BCE loss, ADAM": ("binary_crossentropy", "adam"),
"2 Gaussians, Sparse BCE loss, GD": ("sparse_binary_crossentropy", "gd"),
"2 Gaussians, Sparse BCE loss, SGD": ("sparse_binary_crossentropy", "sgd"),
"2 Gaussians, Sparse BCE loss, MGD": ("sparse_binary_crossentropy", "mgd"),
"2 Gaussians, Sparse BCE loss, ADAM": ("sparse_binary_crossentropy", "adam"),
}
for name in test_params:
loss, optim = test_params[name]
if "sparse" not in loss:
Y = one_hot(gen2C.y, 2)
else:
Y = gen2C.y
run_test(
test_name=name,
X=gen2C.x, Y=Y,
layers=[
(Linear(2, 4), "tanh"),
(Linear(4, 2), "sigmoid")
],
model_kwargs=None,
compile_kwargs=dict(
loss=loss,
optimizer=optim,
learning_rate=1e-4,
metric="accuracy"
),
fit_kwargs=dict(
n_epochs=50,
verbose=False
),
target_score=0.85,
scoring_func=binary_classif_score,
scoring_method="gt"
)
gen2C.make_4_gaussians(sigma=0.2)
# gen2C.display_data()
test_params = {
"4 Gaussians, BCE loss, GD optim": ("binary_crossentropy", "gd"),
"4 Gaussians, BCE loss, SGD optim": ("binary_crossentropy", "sgd"),
"4 Gaussians, BCE loss, MGD optim": ("binary_crossentropy", "mgd"),
"4 Gaussians, BCE loss, ADAM optim": ("binary_crossentropy", "adam"),
"4 Gaussians, Sparse BCE loss, GD optim": ("sparse_binary_crossentropy", "gd"),
"4 Gaussians, Sparse BCE loss, SGD optim": ("sparse_binary_crossentropy", "sgd"),
"4 Gaussians, Sparse BCE loss, MGD optim": ("sparse_binary_crossentropy", "mgd"),
"4 Gaussians, Sparse BCE loss, ADAM optim": ("sparse_binary_crossentropy", "adam")
}
for name in test_params:
loss, optim = test_params[name]
if "sparse" not in loss:
Y = one_hot(gen2C.y, 2)
else:
Y = gen2C.y
run_test(
test_name=name,
X=gen2C.x, Y=Y,
layers=[
(Linear(2, 4), "tanh"),
(Linear(4, 2), "sigmoid")
],
model_kwargs=None,
compile_kwargs=dict(
loss=loss,
optimizer=optim,
learning_rate=1e-2,
n_batch = 20,
metric="accuracy"
),
fit_kwargs=dict(
n_epochs=50,
verbose=False
),
target_score=0.85,
scoring_func=binary_classif_score,
scoring_method="gt"
)
del gen2C
#############################################################################
print(end='\n')
nb_class = 4
print(f"===== CLASSIFICATION WITH {nb_class} CLASSES =====")
gen4C = MultiClassGen(nb_class)
gen4C.make_vertical()
# gen4C.display_data()
test_params = {
"Vertical data, CCE, GD optim": ("categorical_crossentropy", "gd"),
"Vertical data, CCE, SGD optim": ("categorical_crossentropy", "sgd"),
"Vertical data, CCpE, MGD optim": ("categorical_crossentropy", "mgd"),
"Vertical data, CCE, ADAM optim": ("categorical_crossentropy", "adam"),
"Vertical data, Sparse CCE, GD optim": ("sparse_categorical_crossentropy", "gd"),
"Vertical data, Sparse CCE, SGD optim": ("sparse_categorical_crossentropy", "sgd"),
"Vertical data, Sparse CCE, MGD optim": ("sparse_categorical_crossentropy", "mgd"),
"Vertical data, Sparse CCE, ADAM optim": ("sparse_categorical_crossentropy", "adam")
}
for name in test_params:
loss, optim = test_params[name]
if "sparse" not in loss:
Y = one_hot(gen4C.y, 4)
else:
Y = gen4C.y
run_test(
test_name=name,
X=gen4C.x, Y=Y,
layers=[
(Linear(2, nb_class * 4), "tanh"),
(Linear(nb_class * 4, nb_class), "sigmoid")
],
model_kwargs=None,
compile_kwargs=dict(
loss=loss,
optimizer=optim,
learning_rate=0.01,
metric="accuracy"
),
fit_kwargs=dict(
n_epochs=150,
verbose=False
),
target_score=0.85,
scoring_func=multi_classif_score,
scoring_method="gt"
)
del gen4C
#############################################################################
print(end='\n')
nb_class = 4
print("===== REGRESSION PROBLEM =====")
genCont = ContinuousGen()
genCont.make_regression()
# genCont.display_data()
params_optim = [
"gd",
"sgd",
"mgd",
"adam",
]
params_loss = [
"mse",
"mae",
"rmse",
]
for optim in params_optim:
for loss in params_loss:
name = f"Optimizer {optim}\tLoss function {loss}"
run_test(
test_name=name,
X=genCont.x, Y=genCont.y,
layers=[
(Linear(1, 4), "relu"),
(Linear(4, 1), "identity")
],
model_kwargs=None,
compile_kwargs=dict(
loss=loss,
optimizer=optim,
#learning_rate=8e-4
learning_rate=1e-4 if optim != "mgd" else 1e-5,
decay=(1e-4 * 5)
),
fit_kwargs=dict(
n_epochs=150,
verbose=False
),
target_score=0.1,
scoring_func=mse_score,
scoring_method="lt"
)
del genCont
|
from tests.system.action.base import BaseActionTestCase
class ChatMessageUpdate(BaseActionTestCase):
def test_update_correct(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"chat_message/2": {
"user_id": 1,
"content": "blablabla",
"meeting_id": 1,
},
}
)
response = self.request("chat_message.update", {"id": 2, "content": "test"})
self.assert_status_code(response, 200)
self.assert_model_exists("chat_message/2", {"content": "test"})
def test_update_no_permissions(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"user/2": {},
"chat_message/2": {
"user_id": 2,
"content": "blablabla",
"meeting_id": 1,
},
}
)
response = self.request("chat_message.update", {"id": 2, "content": "test"})
self.assert_status_code(response, 403)
assert (
"You must be creator of a chat message to edit it."
in response.json["message"]
)
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_bgp
short_description: Configure BGP in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify router feature and bgp category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
router_bgp:
description:
- Configure BGP.
default: null
type: dict
suboptions:
admin_distance:
description:
- Administrative distance modifications.
type: list
suboptions:
distance:
description:
- Administrative distance to apply (1 - 255).
type: int
id:
description:
- ID.
required: true
type: int
neighbour_prefix:
description:
- Neighbor address prefix.
type: str
route_list:
description:
- Access list of routes to apply new distance to. Source router.access-list.name.
type: str
aggregate_address:
description:
- BGP aggregate address table.
type: list
suboptions:
as_set:
description:
- Enable/disable generate AS set path information.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix:
description:
- Aggregate prefix.
type: str
summary_only:
description:
- Enable/disable filter more specific routes from updates.
type: str
choices:
- enable
- disable
aggregate_address6:
description:
- BGP IPv6 aggregate address table.
type: list
suboptions:
as_set:
description:
- Enable/disable generate AS set path information.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix6:
description:
- Aggregate IPv6 prefix.
type: str
summary_only:
description:
- Enable/disable filter more specific routes from updates.
type: str
choices:
- enable
- disable
always_compare_med:
description:
- Enable/disable always compare MED.
type: str
choices:
- enable
- disable
as:
description:
- Router AS number, valid from 1 to 4294967295, 0 to disable BGP.
type: int
bestpath_as_path_ignore:
description:
- Enable/disable ignore AS path.
type: str
choices:
- enable
- disable
bestpath_cmp_confed_aspath:
description:
- Enable/disable compare federation AS path length.
type: str
choices:
- enable
- disable
bestpath_cmp_routerid:
description:
- Enable/disable compare router ID for identical EBGP paths.
type: str
choices:
- enable
- disable
bestpath_med_confed:
description:
- Enable/disable compare MED among confederation paths.
type: str
choices:
- enable
- disable
bestpath_med_missing_as_worst:
description:
- Enable/disable treat missing MED as least preferred.
type: str
choices:
- enable
- disable
client_to_client_reflection:
description:
- Enable/disable client-to-client route reflection.
type: str
choices:
- enable
- disable
cluster_id:
description:
- Route reflector cluster ID.
type: str
confederation_identifier:
description:
- Confederation identifier.
type: int
confederation_peers:
description:
- Confederation peers.
type: list
suboptions:
peer:
description:
- Peer ID.
required: true
type: str
dampening:
description:
- Enable/disable route-flap dampening.
type: str
choices:
- enable
- disable
dampening_max_suppress_time:
description:
- Maximum minutes a route can be suppressed.
type: int
dampening_reachability_half_life:
description:
- Reachability half-life time for penalty (min).
type: int
dampening_reuse:
description:
- Threshold to reuse routes.
type: int
dampening_route_map:
description:
- Criteria for dampening. Source router.route-map.name.
type: str
dampening_suppress:
description:
- Threshold to suppress routes.
type: int
dampening_unreachability_half_life:
description:
- Unreachability half-life time for penalty (min).
type: int
default_local_preference:
description:
- Default local preference.
type: int
deterministic_med:
description:
- Enable/disable enforce deterministic comparison of MED.
type: str
choices:
- enable
- disable
distance_external:
description:
- Distance for routes external to the AS.
type: int
distance_internal:
description:
- Distance for routes internal to the AS.
type: int
distance_local:
description:
- Distance for routes local to the AS.
type: int
ebgp_multipath:
description:
- Enable/disable EBGP multi-path.
type: str
choices:
- enable
- disable
enforce_first_as:
description:
- Enable/disable enforce first AS for EBGP routes.
type: str
choices:
- enable
- disable
fast_external_failover:
description:
- Enable/disable reset peer BGP session if link goes down.
type: str
choices:
- enable
- disable
graceful_end_on_timer:
description:
- Enable/disable to exit graceful restart on timer only.
type: str
choices:
- enable
- disable
graceful_restart:
description:
- Enable/disable BGP graceful restart capabilities.
type: str
choices:
- enable
- disable
graceful_restart_time:
description:
- Time needed for neighbors to restart (sec).
type: int
graceful_stalepath_time:
description:
- Time to hold stale paths of restarting neighbor (sec).
type: int
graceful_update_delay:
description:
- Route advertisement/selection delay after restart (sec).
type: int
holdtime_timer:
description:
- Number of seconds to mark peer as dead.
type: int
ibgp_multipath:
description:
- Enable/disable IBGP multi-path.
type: str
choices:
- enable
- disable
ignore_optional_capability:
description:
- Don't send unknown optional capability notification message
type: str
choices:
- enable
- disable
keepalive_timer:
description:
- Frequency to send keep alive requests.
type: int
log_neighbour_changes:
description:
- Enable logging of BGP neighbour's changes
type: str
choices:
- enable
- disable
neighbor:
description:
- BGP neighbor table.
type: list
suboptions:
activate:
description:
- Enable/disable address family IPv4 for this neighbor.
type: str
choices:
- enable
- disable
activate6:
description:
- Enable/disable address family IPv6 for this neighbor.
type: str
choices:
- enable
- disable
advertisement_interval:
description:
- Minimum interval (sec) between sending updates.
type: int
allowas_in:
description:
- IPv4 The maximum number of occurrence of my AS number allowed.
type: int
allowas_in_enable:
description:
- Enable/disable IPv4 Enable to allow my AS in AS path.
type: str
choices:
- enable
- disable
allowas_in_enable6:
description:
- Enable/disable IPv6 Enable to allow my AS in AS path.
type: str
choices:
- enable
- disable
allowas_in6:
description:
- IPv6 The maximum number of occurrence of my AS number allowed.
type: int
as_override:
description:
- Enable/disable replace peer AS with own AS for IPv4.
type: str
choices:
- enable
- disable
as_override6:
description:
- Enable/disable replace peer AS with own AS for IPv6.
type: str
choices:
- enable
- disable
attribute_unchanged:
description:
- IPv4 List of attributes that should be unchanged.
type: str
choices:
- as-path
- med
- next-hop
attribute_unchanged6:
description:
- IPv6 List of attributes that should be unchanged.
type: str
choices:
- as-path
- med
- next-hop
bfd:
description:
- Enable/disable BFD for this neighbor.
type: str
choices:
- enable
- disable
capability_default_originate:
description:
- Enable/disable advertise default IPv4 route to this neighbor.
type: str
choices:
- enable
- disable
capability_default_originate6:
description:
- Enable/disable advertise default IPv6 route to this neighbor.
type: str
choices:
- enable
- disable
capability_dynamic:
description:
- Enable/disable advertise dynamic capability to this neighbor.
type: str
choices:
- enable
- disable
capability_graceful_restart:
description:
- Enable/disable advertise IPv4 graceful restart capability to this neighbor.
type: str
choices:
- enable
- disable
capability_graceful_restart6:
description:
- Enable/disable advertise IPv6 graceful restart capability to this neighbor.
type: str
choices:
- enable
- disable
capability_orf:
description:
- Accept/Send IPv4 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_orf6:
description:
- Accept/Send IPv6 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_route_refresh:
description:
- Enable/disable advertise route refresh capability to this neighbor.
type: str
choices:
- enable
- disable
conditional_advertise:
description:
- Conditional advertisement.
type: list
suboptions:
advertise_routemap:
description:
- Name of advertising route map. Source router.route-map.name.
type: str
condition_routemap:
description:
- Name of condition route map. Source router.route-map.name.
type: str
condition_type:
description:
- Type of condition.
type: str
choices:
- exist
- non-exist
connect_timer:
description:
- Interval (sec) for connect timer.
type: int
default_originate_routemap:
description:
- Route map to specify criteria to originate IPv4 default. Source router.route-map.name.
type: str
default_originate_routemap6:
description:
- Route map to specify criteria to originate IPv6 default. Source router.route-map.name.
type: str
description:
description:
- Description.
type: str
distribute_list_in:
description:
- Filter for IPv4 updates from this neighbor. Source router.access-list.name.
type: str
distribute_list_in6:
description:
- Filter for IPv6 updates from this neighbor. Source router.access-list6.name.
type: str
distribute_list_out:
description:
- Filter for IPv4 updates to this neighbor. Source router.access-list.name.
type: str
distribute_list_out6:
description:
- Filter for IPv6 updates to this neighbor. Source router.access-list6.name.
type: str
dont_capability_negotiate:
description:
- Don't negotiate capabilities with this neighbor
type: str
choices:
- enable
- disable
ebgp_enforce_multihop:
description:
- Enable/disable allow multi-hop EBGP neighbors.
type: str
choices:
- enable
- disable
ebgp_multihop_ttl:
description:
- EBGP multihop TTL for this peer.
type: int
filter_list_in:
description:
- BGP filter for IPv4 inbound routes. Source router.aspath-list.name.
type: str
filter_list_in6:
description:
- BGP filter for IPv6 inbound routes. Source router.aspath-list.name.
type: str
filter_list_out:
description:
- BGP filter for IPv4 outbound routes. Source router.aspath-list.name.
type: str
filter_list_out6:
description:
- BGP filter for IPv6 outbound routes. Source router.aspath-list.name.
type: str
holdtime_timer:
description:
- Interval (sec) before peer considered dead.
type: int
interface:
description:
- Interface Source system.interface.name.
type: str
ip:
description:
- IP/IPv6 address of neighbor.
required: true
type: str
keep_alive_timer:
description:
- Keep alive timer interval (sec).
type: int
link_down_failover:
description:
- Enable/disable failover upon link down.
type: str
choices:
- enable
- disable
local_as:
description:
- Local AS number of neighbor.
type: int
local_as_no_prepend:
description:
- Do not prepend local-as to incoming updates.
type: str
choices:
- enable
- disable
local_as_replace_as:
description:
- Replace real AS with local-as in outgoing updates.
type: str
choices:
- enable
- disable
maximum_prefix:
description:
- Maximum number of IPv4 prefixes to accept from this peer.
type: int
maximum_prefix_threshold:
description:
- Maximum IPv4 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_threshold6:
description:
- Maximum IPv6 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_warning_only:
description:
- Enable/disable IPv4 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix_warning_only6:
description:
- Enable/disable IPv6 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix6:
description:
- Maximum number of IPv6 prefixes to accept from this peer.
type: int
next_hop_self:
description:
- Enable/disable IPv4 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
next_hop_self6:
description:
- Enable/disable IPv6 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
override_capability:
description:
- Enable/disable override result of capability negotiation.
type: str
choices:
- enable
- disable
passive:
description:
- Enable/disable sending of open messages to this neighbor.
type: str
choices:
- enable
- disable
password:
description:
- Password used in MD5 authentication.
type: str
prefix_list_in:
description:
- IPv4 Inbound filter for updates from this neighbor. Source router.prefix-list.name.
type: str
prefix_list_in6:
description:
- IPv6 Inbound filter for updates from this neighbor. Source router.prefix-list6.name.
type: str
prefix_list_out:
description:
- IPv4 Outbound filter for updates to this neighbor. Source router.prefix-list.name.
type: str
prefix_list_out6:
description:
- IPv6 Outbound filter for updates to this neighbor. Source router.prefix-list6.name.
type: str
remote_as:
description:
- AS number of neighbor.
type: int
remove_private_as:
description:
- Enable/disable remove private AS number from IPv4 outbound updates.
type: str
choices:
- enable
- disable
remove_private_as6:
description:
- Enable/disable remove private AS number from IPv6 outbound updates.
type: str
choices:
- enable
- disable
restart_time:
description:
- Graceful restart delay time (sec, 0 = global default).
type: int
retain_stale_time:
description:
- Time to retain stale routes.
type: int
route_map_in:
description:
- IPv4 Inbound route map filter. Source router.route-map.name.
type: str
route_map_in6:
description:
- IPv6 Inbound route map filter. Source router.route-map.name.
type: str
route_map_out:
description:
- IPv4 Outbound route map filter. Source router.route-map.name.
type: str
route_map_out6:
description:
- IPv6 Outbound route map filter. Source router.route-map.name.
type: str
route_reflector_client:
description:
- Enable/disable IPv4 AS route reflector client.
type: str
choices:
- enable
- disable
route_reflector_client6:
description:
- Enable/disable IPv6 AS route reflector client.
type: str
choices:
- enable
- disable
route_server_client:
description:
- Enable/disable IPv4 AS route server client.
type: str
choices:
- enable
- disable
route_server_client6:
description:
- Enable/disable IPv6 AS route server client.
type: str
choices:
- enable
- disable
send_community:
description:
- IPv4 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
send_community6:
description:
- IPv6 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
shutdown:
description:
- Enable/disable shutdown this neighbor.
type: str
choices:
- enable
- disable
soft_reconfiguration:
description:
- Enable/disable allow IPv4 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
soft_reconfiguration6:
description:
- Enable/disable allow IPv6 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
stale_route:
description:
- Enable/disable stale route after neighbor down.
type: str
choices:
- enable
- disable
strict_capability_match:
description:
- Enable/disable strict capability matching.
type: str
choices:
- enable
- disable
unsuppress_map:
description:
- IPv4 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
unsuppress_map6:
description:
- IPv6 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
update_source:
description:
- Interface to use as source IP/IPv6 address of TCP connections. Source system.interface.name.
type: str
weight:
description:
- Neighbor weight.
type: int
neighbor_group:
description:
- BGP neighbor group table.
type: list
suboptions:
activate:
description:
- Enable/disable address family IPv4 for this neighbor.
type: str
choices:
- enable
- disable
activate6:
description:
- Enable/disable address family IPv6 for this neighbor.
type: str
choices:
- enable
- disable
advertisement_interval:
description:
- Minimum interval (sec) between sending updates.
type: int
allowas_in:
description:
- IPv4 The maximum number of occurrence of my AS number allowed.
type: int
allowas_in_enable:
description:
- Enable/disable IPv4 Enable to allow my AS in AS path.
type: str
choices:
- enable
- disable
allowas_in_enable6:
description:
- Enable/disable IPv6 Enable to allow my AS in AS path.
type: str
choices:
- enable
- disable
allowas_in6:
description:
- IPv6 The maximum number of occurrence of my AS number allowed.
type: int
as_override:
description:
- Enable/disable replace peer AS with own AS for IPv4.
type: str
choices:
- enable
- disable
as_override6:
description:
- Enable/disable replace peer AS with own AS for IPv6.
type: str
choices:
- enable
- disable
attribute_unchanged:
description:
- IPv4 List of attributes that should be unchanged.
type: str
choices:
- as-path
- med
- next-hop
attribute_unchanged6:
description:
- IPv6 List of attributes that should be unchanged.
type: str
choices:
- as-path
- med
- next-hop
bfd:
description:
- Enable/disable BFD for this neighbor.
type: str
choices:
- enable
- disable
capability_default_originate:
description:
- Enable/disable advertise default IPv4 route to this neighbor.
type: str
choices:
- enable
- disable
capability_default_originate6:
description:
- Enable/disable advertise default IPv6 route to this neighbor.
type: str
choices:
- enable
- disable
capability_dynamic:
description:
- Enable/disable advertise dynamic capability to this neighbor.
type: str
choices:
- enable
- disable
capability_graceful_restart:
description:
- Enable/disable advertise IPv4 graceful restart capability to this neighbor.
type: str
choices:
- enable
- disable
capability_graceful_restart6:
description:
- Enable/disable advertise IPv6 graceful restart capability to this neighbor.
type: str
choices:
- enable
- disable
capability_orf:
description:
- Accept/Send IPv4 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_orf6:
description:
- Accept/Send IPv6 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_route_refresh:
description:
- Enable/disable advertise route refresh capability to this neighbor.
type: str
choices:
- enable
- disable
connect_timer:
description:
- Interval (sec) for connect timer.
type: int
default_originate_routemap:
description:
- Route map to specify criteria to originate IPv4 default. Source router.route-map.name.
type: str
default_originate_routemap6:
description:
- Route map to specify criteria to originate IPv6 default. Source router.route-map.name.
type: str
description:
description:
- Description.
type: str
distribute_list_in:
description:
- Filter for IPv4 updates from this neighbor. Source router.access-list.name.
type: str
distribute_list_in6:
description:
- Filter for IPv6 updates from this neighbor. Source router.access-list6.name.
type: str
distribute_list_out:
description:
- Filter for IPv4 updates to this neighbor. Source router.access-list.name.
type: str
distribute_list_out6:
description:
- Filter for IPv6 updates to this neighbor. Source router.access-list6.name.
type: str
dont_capability_negotiate:
description:
- Don't negotiate capabilities with this neighbor
type: str
choices:
- enable
- disable
ebgp_enforce_multihop:
description:
- Enable/disable allow multi-hop EBGP neighbors.
type: str
choices:
- enable
- disable
ebgp_multihop_ttl:
description:
- EBGP multihop TTL for this peer.
type: int
filter_list_in:
description:
- BGP filter for IPv4 inbound routes. Source router.aspath-list.name.
type: str
filter_list_in6:
description:
- BGP filter for IPv6 inbound routes. Source router.aspath-list.name.
type: str
filter_list_out:
description:
- BGP filter for IPv4 outbound routes. Source router.aspath-list.name.
type: str
filter_list_out6:
description:
- BGP filter for IPv6 outbound routes. Source router.aspath-list.name.
type: str
holdtime_timer:
description:
- Interval (sec) before peer considered dead.
type: int
interface:
description:
- Interface Source system.interface.name.
type: str
keep_alive_timer:
description:
- Keep alive timer interval (sec).
type: int
link_down_failover:
description:
- Enable/disable failover upon link down.
type: str
choices:
- enable
- disable
local_as:
description:
- Local AS number of neighbor.
type: int
local_as_no_prepend:
description:
- Do not prepend local-as to incoming updates.
type: str
choices:
- enable
- disable
local_as_replace_as:
description:
- Replace real AS with local-as in outgoing updates.
type: str
choices:
- enable
- disable
maximum_prefix:
description:
- Maximum number of IPv4 prefixes to accept from this peer.
type: int
maximum_prefix_threshold:
description:
- Maximum IPv4 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_threshold6:
description:
- Maximum IPv6 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_warning_only:
description:
- Enable/disable IPv4 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix_warning_only6:
description:
- Enable/disable IPv6 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix6:
description:
- Maximum number of IPv6 prefixes to accept from this peer.
type: int
name:
description:
- Neighbor group name.
required: true
type: str
next_hop_self:
description:
- Enable/disable IPv4 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
next_hop_self6:
description:
- Enable/disable IPv6 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
override_capability:
description:
- Enable/disable override result of capability negotiation.
type: str
choices:
- enable
- disable
passive:
description:
- Enable/disable sending of open messages to this neighbor.
type: str
choices:
- enable
- disable
prefix_list_in:
description:
- IPv4 Inbound filter for updates from this neighbor. Source router.prefix-list.name.
type: str
prefix_list_in6:
description:
- IPv6 Inbound filter for updates from this neighbor. Source router.prefix-list6.name.
type: str
prefix_list_out:
description:
- IPv4 Outbound filter for updates to this neighbor. Source router.prefix-list.name.
type: str
prefix_list_out6:
description:
- IPv6 Outbound filter for updates to this neighbor. Source router.prefix-list6.name.
type: str
remote_as:
description:
- AS number of neighbor.
type: int
remove_private_as:
description:
- Enable/disable remove private AS number from IPv4 outbound updates.
type: str
choices:
- enable
- disable
remove_private_as6:
description:
- Enable/disable remove private AS number from IPv6 outbound updates.
type: str
choices:
- enable
- disable
restart_time:
description:
- Graceful restart delay time (sec, 0 = global default).
type: int
retain_stale_time:
description:
- Time to retain stale routes.
type: int
route_map_in:
description:
- IPv4 Inbound route map filter. Source router.route-map.name.
type: str
route_map_in6:
description:
- IPv6 Inbound route map filter. Source router.route-map.name.
type: str
route_map_out:
description:
- IPv4 Outbound route map filter. Source router.route-map.name.
type: str
route_map_out6:
description:
- IPv6 Outbound route map filter. Source router.route-map.name.
type: str
route_reflector_client:
description:
- Enable/disable IPv4 AS route reflector client.
type: str
choices:
- enable
- disable
route_reflector_client6:
description:
- Enable/disable IPv6 AS route reflector client.
type: str
choices:
- enable
- disable
route_server_client:
description:
- Enable/disable IPv4 AS route server client.
type: str
choices:
- enable
- disable
route_server_client6:
description:
- Enable/disable IPv6 AS route server client.
type: str
choices:
- enable
- disable
send_community:
description:
- IPv4 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
send_community6:
description:
- IPv6 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
shutdown:
description:
- Enable/disable shutdown this neighbor.
type: str
choices:
- enable
- disable
soft_reconfiguration:
description:
- Enable/disable allow IPv4 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
soft_reconfiguration6:
description:
- Enable/disable allow IPv6 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
stale_route:
description:
- Enable/disable stale route after neighbor down.
type: str
choices:
- enable
- disable
strict_capability_match:
description:
- Enable/disable strict capability matching.
type: str
choices:
- enable
- disable
unsuppress_map:
description:
- IPv4 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
unsuppress_map6:
description:
- IPv6 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
update_source:
description:
- Interface to use as source IP/IPv6 address of TCP connections. Source system.interface.name.
type: str
weight:
description:
- Neighbor weight.
type: int
neighbor_range:
description:
- BGP neighbor range table.
type: list
suboptions:
id:
description:
- Neighbor range ID.
required: true
type: int
max_neighbor_num:
description:
- Maximum number of neighbors.
type: int
neighbor_group:
description:
- Neighbor group name. Source router.bgp.neighbor-group.name.
type: str
prefix:
description:
- Neighbor range prefix.
type: str
neighbor_range6:
description:
- BGP IPv6 neighbor range table.
type: list
suboptions:
id:
description:
- IPv6 neighbor range ID.
required: true
type: int
max_neighbor_num:
description:
- Maximum number of neighbors.
type: int
neighbor_group:
description:
- Neighbor group name. Source router.bgp.neighbor-group.name.
type: str
prefix6:
description:
- IPv6 prefix.
type: str
network:
description:
- BGP network table.
type: list
suboptions:
backdoor:
description:
- Enable/disable route as backdoor.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix:
description:
- Network prefix.
type: str
route_map:
description:
- Route map to modify generated route. Source router.route-map.name.
type: str
network_import_check:
description:
- Enable/disable ensure BGP network route exists in IGP.
type: str
choices:
- enable
- disable
network6:
description:
- BGP IPv6 network table.
type: list
suboptions:
backdoor:
description:
- Enable/disable route as backdoor.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix6:
description:
- Network IPv6 prefix.
type: str
route_map:
description:
- Route map to modify generated route. Source router.route-map.name.
type: str
redistribute:
description:
- BGP IPv4 redistribute table.
type: list
suboptions:
name:
description:
- Distribute list entry name.
required: true
type: str
route_map:
description:
- Route map name. Source router.route-map.name.
type: str
status:
description:
- Status
type: str
choices:
- enable
- disable
redistribute6:
description:
- BGP IPv6 redistribute table.
type: list
suboptions:
name:
description:
- Distribute list entry name.
required: true
type: str
route_map:
description:
- Route map name. Source router.route-map.name.
type: str
status:
description:
- Status
type: str
choices:
- enable
- disable
router_id:
description:
- Router ID.
type: str
scan_time:
description:
- Background scanner interval (sec), 0 to disable it.
type: int
synchronization:
description:
- Enable/disable only advertise routes from iBGP if routes present in an IGP.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure BGP.
fortios_router_bgp:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
router_bgp:
admin_distance:
-
distance: "4"
id: "5"
neighbour_prefix: "<your_own_value>"
route_list: "<your_own_value> (source router.access-list.name)"
aggregate_address:
-
as_set: "enable"
id: "10"
prefix: "<your_own_value>"
summary_only: "enable"
aggregate_address6:
-
as_set: "enable"
id: "15"
prefix6: "<your_own_value>"
summary_only: "enable"
always_compare_med: "enable"
as: "19"
bestpath_as_path_ignore: "enable"
bestpath_cmp_confed_aspath: "enable"
bestpath_cmp_routerid: "enable"
bestpath_med_confed: "enable"
bestpath_med_missing_as_worst: "enable"
client_to_client_reflection: "enable"
cluster_id: "<your_own_value>"
confederation_identifier: "27"
confederation_peers:
-
peer: "<your_own_value>"
dampening: "enable"
dampening_max_suppress_time: "31"
dampening_reachability_half_life: "32"
dampening_reuse: "33"
dampening_route_map: "<your_own_value> (source router.route-map.name)"
dampening_suppress: "35"
dampening_unreachability_half_life: "36"
default_local_preference: "37"
deterministic_med: "enable"
distance_external: "39"
distance_internal: "40"
distance_local: "41"
ebgp_multipath: "enable"
enforce_first_as: "enable"
fast_external_failover: "enable"
graceful_end_on_timer: "enable"
graceful_restart: "enable"
graceful_restart_time: "47"
graceful_stalepath_time: "48"
graceful_update_delay: "49"
holdtime_timer: "50"
ibgp_multipath: "enable"
ignore_optional_capability: "enable"
keepalive_timer: "53"
log_neighbour_changes: "enable"
neighbor:
-
activate: "enable"
activate6: "enable"
advertisement_interval: "58"
allowas_in: "59"
allowas_in_enable: "enable"
allowas_in_enable6: "enable"
allowas_in6: "62"
as_override: "enable"
as_override6: "enable"
attribute_unchanged: "as-path"
attribute_unchanged6: "as-path"
bfd: "enable"
capability_default_originate: "enable"
capability_default_originate6: "enable"
capability_dynamic: "enable"
capability_graceful_restart: "enable"
capability_graceful_restart6: "enable"
capability_orf: "none"
capability_orf6: "none"
capability_route_refresh: "enable"
conditional_advertise:
-
advertise_routemap: "<your_own_value> (source router.route-map.name)"
condition_routemap: "<your_own_value> (source router.route-map.name)"
condition_type: "exist"
connect_timer: "80"
default_originate_routemap: "<your_own_value> (source router.route-map.name)"
default_originate_routemap6: "<your_own_value> (source router.route-map.name)"
description: "<your_own_value>"
distribute_list_in: "<your_own_value> (source router.access-list.name)"
distribute_list_in6: "<your_own_value> (source router.access-list6.name)"
distribute_list_out: "<your_own_value> (source router.access-list.name)"
distribute_list_out6: "<your_own_value> (source router.access-list6.name)"
dont_capability_negotiate: "enable"
ebgp_enforce_multihop: "enable"
ebgp_multihop_ttl: "90"
filter_list_in: "<your_own_value> (source router.aspath-list.name)"
filter_list_in6: "<your_own_value> (source router.aspath-list.name)"
filter_list_out: "<your_own_value> (source router.aspath-list.name)"
filter_list_out6: "<your_own_value> (source router.aspath-list.name)"
holdtime_timer: "95"
interface: "<your_own_value> (source system.interface.name)"
ip: "<your_own_value>"
keep_alive_timer: "98"
link_down_failover: "enable"
local_as: "100"
local_as_no_prepend: "enable"
local_as_replace_as: "enable"
maximum_prefix: "103"
maximum_prefix_threshold: "104"
maximum_prefix_threshold6: "105"
maximum_prefix_warning_only: "enable"
maximum_prefix_warning_only6: "enable"
maximum_prefix6: "108"
next_hop_self: "enable"
next_hop_self6: "enable"
override_capability: "enable"
passive: "enable"
password: "<your_own_value>"
prefix_list_in: "<your_own_value> (source router.prefix-list.name)"
prefix_list_in6: "<your_own_value> (source router.prefix-list6.name)"
prefix_list_out: "<your_own_value> (source router.prefix-list.name)"
prefix_list_out6: "<your_own_value> (source router.prefix-list6.name)"
remote_as: "118"
remove_private_as: "enable"
remove_private_as6: "enable"
restart_time: "121"
retain_stale_time: "122"
route_map_in: "<your_own_value> (source router.route-map.name)"
route_map_in6: "<your_own_value> (source router.route-map.name)"
route_map_out: "<your_own_value> (source router.route-map.name)"
route_map_out6: "<your_own_value> (source router.route-map.name)"
route_reflector_client: "enable"
route_reflector_client6: "enable"
route_server_client: "enable"
route_server_client6: "enable"
send_community: "standard"
send_community6: "standard"
shutdown: "enable"
soft_reconfiguration: "enable"
soft_reconfiguration6: "enable"
stale_route: "enable"
strict_capability_match: "enable"
unsuppress_map: "<your_own_value> (source router.route-map.name)"
unsuppress_map6: "<your_own_value> (source router.route-map.name)"
update_source: "<your_own_value> (source system.interface.name)"
weight: "141"
neighbor_group:
-
activate: "enable"
activate6: "enable"
advertisement_interval: "145"
allowas_in: "146"
allowas_in_enable: "enable"
allowas_in_enable6: "enable"
allowas_in6: "149"
as_override: "enable"
as_override6: "enable"
attribute_unchanged: "as-path"
attribute_unchanged6: "as-path"
bfd: "enable"
capability_default_originate: "enable"
capability_default_originate6: "enable"
capability_dynamic: "enable"
capability_graceful_restart: "enable"
capability_graceful_restart6: "enable"
capability_orf: "none"
capability_orf6: "none"
capability_route_refresh: "enable"
connect_timer: "163"
default_originate_routemap: "<your_own_value> (source router.route-map.name)"
default_originate_routemap6: "<your_own_value> (source router.route-map.name)"
description: "<your_own_value>"
distribute_list_in: "<your_own_value> (source router.access-list.name)"
distribute_list_in6: "<your_own_value> (source router.access-list6.name)"
distribute_list_out: "<your_own_value> (source router.access-list.name)"
distribute_list_out6: "<your_own_value> (source router.access-list6.name)"
dont_capability_negotiate: "enable"
ebgp_enforce_multihop: "enable"
ebgp_multihop_ttl: "173"
filter_list_in: "<your_own_value> (source router.aspath-list.name)"
filter_list_in6: "<your_own_value> (source router.aspath-list.name)"
filter_list_out: "<your_own_value> (source router.aspath-list.name)"
filter_list_out6: "<your_own_value> (source router.aspath-list.name)"
holdtime_timer: "178"
interface: "<your_own_value> (source system.interface.name)"
keep_alive_timer: "180"
link_down_failover: "enable"
local_as: "182"
local_as_no_prepend: "enable"
local_as_replace_as: "enable"
maximum_prefix: "185"
maximum_prefix_threshold: "186"
maximum_prefix_threshold6: "187"
maximum_prefix_warning_only: "enable"
maximum_prefix_warning_only6: "enable"
maximum_prefix6: "190"
name: "default_name_191"
next_hop_self: "enable"
next_hop_self6: "enable"
override_capability: "enable"
passive: "enable"
prefix_list_in: "<your_own_value> (source router.prefix-list.name)"
prefix_list_in6: "<your_own_value> (source router.prefix-list6.name)"
prefix_list_out: "<your_own_value> (source router.prefix-list.name)"
prefix_list_out6: "<your_own_value> (source router.prefix-list6.name)"
remote_as: "200"
remove_private_as: "enable"
remove_private_as6: "enable"
restart_time: "203"
retain_stale_time: "204"
route_map_in: "<your_own_value> (source router.route-map.name)"
route_map_in6: "<your_own_value> (source router.route-map.name)"
route_map_out: "<your_own_value> (source router.route-map.name)"
route_map_out6: "<your_own_value> (source router.route-map.name)"
route_reflector_client: "enable"
route_reflector_client6: "enable"
route_server_client: "enable"
route_server_client6: "enable"
send_community: "standard"
send_community6: "standard"
shutdown: "enable"
soft_reconfiguration: "enable"
soft_reconfiguration6: "enable"
stale_route: "enable"
strict_capability_match: "enable"
unsuppress_map: "<your_own_value> (source router.route-map.name)"
unsuppress_map6: "<your_own_value> (source router.route-map.name)"
update_source: "<your_own_value> (source system.interface.name)"
weight: "223"
neighbor_range:
-
id: "225"
max_neighbor_num: "226"
neighbor_group: "<your_own_value> (source router.bgp.neighbor-group.name)"
prefix: "<your_own_value>"
neighbor_range6:
-
id: "230"
max_neighbor_num: "231"
neighbor_group: "<your_own_value> (source router.bgp.neighbor-group.name)"
prefix6: "<your_own_value>"
network:
-
backdoor: "enable"
id: "236"
prefix: "<your_own_value>"
route_map: "<your_own_value> (source router.route-map.name)"
network_import_check: "enable"
network6:
-
backdoor: "enable"
id: "242"
prefix6: "<your_own_value>"
route_map: "<your_own_value> (source router.route-map.name)"
redistribute:
-
name: "default_name_246"
route_map: "<your_own_value> (source router.route-map.name)"
status: "enable"
redistribute6:
-
name: "default_name_250"
route_map: "<your_own_value> (source router.route-map.name)"
status: "enable"
router_id: "<your_own_value>"
scan_time: "254"
synchronization: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_router_bgp_data(json):
option_list = ['admin_distance', 'aggregate_address', 'aggregate_address6',
'always_compare_med', 'as', 'bestpath_as_path_ignore',
'bestpath_cmp_confed_aspath', 'bestpath_cmp_routerid', 'bestpath_med_confed',
'bestpath_med_missing_as_worst', 'client_to_client_reflection', 'cluster_id',
'confederation_identifier', 'confederation_peers', 'dampening',
'dampening_max_suppress_time', 'dampening_reachability_half_life', 'dampening_reuse',
'dampening_route_map', 'dampening_suppress', 'dampening_unreachability_half_life',
'default_local_preference', 'deterministic_med', 'distance_external',
'distance_internal', 'distance_local', 'ebgp_multipath',
'enforce_first_as', 'fast_external_failover', 'graceful_end_on_timer',
'graceful_restart', 'graceful_restart_time', 'graceful_stalepath_time',
'graceful_update_delay', 'holdtime_timer', 'ibgp_multipath',
'ignore_optional_capability', 'keepalive_timer', 'log_neighbour_changes',
'neighbor', 'neighbor_group', 'neighbor_range',
'neighbor_range6', 'network', 'network_import_check',
'network6', 'redistribute', 'redistribute6',
'router_id', 'scan_time', 'synchronization']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def router_bgp(data, fos):
vdom = data['vdom']
router_bgp_data = data['router_bgp']
filtered_data = underscore_to_hyphen(filter_router_bgp_data(router_bgp_data))
return fos.set('router',
'bgp',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_router(data, fos):
if data['router_bgp']:
resp = router_bgp(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"router_bgp": {
"required": False, "type": "dict", "default": None,
"options": {
"admin_distance": {"required": False, "type": "list",
"options": {
"distance": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"neighbour_prefix": {"required": False, "type": "str"},
"route_list": {"required": False, "type": "str"}
}},
"aggregate_address": {"required": False, "type": "list",
"options": {
"as_set": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"},
"summary_only": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"aggregate_address6": {"required": False, "type": "list",
"options": {
"as_set": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": True, "type": "int"},
"prefix6": {"required": False, "type": "str"},
"summary_only": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"always_compare_med": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"as": {"required": False, "type": "int"},
"bestpath_as_path_ignore": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"bestpath_cmp_confed_aspath": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"bestpath_cmp_routerid": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"bestpath_med_confed": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"bestpath_med_missing_as_worst": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client_to_client_reflection": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"cluster_id": {"required": False, "type": "str"},
"confederation_identifier": {"required": False, "type": "int"},
"confederation_peers": {"required": False, "type": "list",
"options": {
"peer": {"required": True, "type": "str"}
}},
"dampening": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dampening_max_suppress_time": {"required": False, "type": "int"},
"dampening_reachability_half_life": {"required": False, "type": "int"},
"dampening_reuse": {"required": False, "type": "int"},
"dampening_route_map": {"required": False, "type": "str"},
"dampening_suppress": {"required": False, "type": "int"},
"dampening_unreachability_half_life": {"required": False, "type": "int"},
"default_local_preference": {"required": False, "type": "int"},
"deterministic_med": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"distance_external": {"required": False, "type": "int"},
"distance_internal": {"required": False, "type": "int"},
"distance_local": {"required": False, "type": "int"},
"ebgp_multipath": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"enforce_first_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fast_external_failover": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"graceful_end_on_timer": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"graceful_restart": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"graceful_restart_time": {"required": False, "type": "int"},
"graceful_stalepath_time": {"required": False, "type": "int"},
"graceful_update_delay": {"required": False, "type": "int"},
"holdtime_timer": {"required": False, "type": "int"},
"ibgp_multipath": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ignore_optional_capability": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"keepalive_timer": {"required": False, "type": "int"},
"log_neighbour_changes": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"neighbor": {"required": False, "type": "list",
"options": {
"activate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"activate6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"advertisement_interval": {"required": False, "type": "int"},
"allowas_in": {"required": False, "type": "int"},
"allowas_in_enable": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"allowas_in_enable6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"allowas_in6": {"required": False, "type": "int"},
"as_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"as_override6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"attribute_unchanged": {"required": False, "type": "str",
"choices": ["as-path", "med", "next-hop"]},
"attribute_unchanged6": {"required": False, "type": "str",
"choices": ["as-path", "med", "next-hop"]},
"bfd": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_default_originate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_default_originate6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_dynamic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_graceful_restart": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_graceful_restart6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_orf": {"required": False, "type": "str",
"choices": ["none", "receive", "send",
"both"]},
"capability_orf6": {"required": False, "type": "str",
"choices": ["none", "receive", "send",
"both"]},
"capability_route_refresh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"conditional_advertise": {"required": False, "type": "list",
"options": {
"advertise_routemap": {"required": False, "type": "str"},
"condition_routemap": {"required": False, "type": "str"},
"condition_type": {"required": False, "type": "str",
"choices": ["exist", "non-exist"]}
}},
"connect_timer": {"required": False, "type": "int"},
"default_originate_routemap": {"required": False, "type": "str"},
"default_originate_routemap6": {"required": False, "type": "str"},
"description": {"required": False, "type": "str"},
"distribute_list_in": {"required": False, "type": "str"},
"distribute_list_in6": {"required": False, "type": "str"},
"distribute_list_out": {"required": False, "type": "str"},
"distribute_list_out6": {"required": False, "type": "str"},
"dont_capability_negotiate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ebgp_enforce_multihop": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ebgp_multihop_ttl": {"required": False, "type": "int"},
"filter_list_in": {"required": False, "type": "str"},
"filter_list_in6": {"required": False, "type": "str"},
"filter_list_out": {"required": False, "type": "str"},
"filter_list_out6": {"required": False, "type": "str"},
"holdtime_timer": {"required": False, "type": "int"},
"interface": {"required": False, "type": "str"},
"ip": {"required": True, "type": "str"},
"keep_alive_timer": {"required": False, "type": "int"},
"link_down_failover": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_as": {"required": False, "type": "int"},
"local_as_no_prepend": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_as_replace_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix": {"required": False, "type": "int"},
"maximum_prefix_threshold": {"required": False, "type": "int"},
"maximum_prefix_threshold6": {"required": False, "type": "int"},
"maximum_prefix_warning_only": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix_warning_only6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix6": {"required": False, "type": "int"},
"next_hop_self": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"next_hop_self6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override_capability": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"passive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"password": {"required": False, "type": "str"},
"prefix_list_in": {"required": False, "type": "str"},
"prefix_list_in6": {"required": False, "type": "str"},
"prefix_list_out": {"required": False, "type": "str"},
"prefix_list_out6": {"required": False, "type": "str"},
"remote_as": {"required": False, "type": "int"},
"remove_private_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"remove_private_as6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"restart_time": {"required": False, "type": "int"},
"retain_stale_time": {"required": False, "type": "int"},
"route_map_in": {"required": False, "type": "str"},
"route_map_in6": {"required": False, "type": "str"},
"route_map_out": {"required": False, "type": "str"},
"route_map_out6": {"required": False, "type": "str"},
"route_reflector_client": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_reflector_client6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_server_client": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_server_client6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"send_community": {"required": False, "type": "str",
"choices": ["standard", "extended", "both",
"disable"]},
"send_community6": {"required": False, "type": "str",
"choices": ["standard", "extended", "both",
"disable"]},
"shutdown": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"soft_reconfiguration": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"soft_reconfiguration6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"stale_route": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"strict_capability_match": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"unsuppress_map": {"required": False, "type": "str"},
"unsuppress_map6": {"required": False, "type": "str"},
"update_source": {"required": False, "type": "str"},
"weight": {"required": False, "type": "int"}
}},
"neighbor_group": {"required": False, "type": "list",
"options": {
"activate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"activate6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"advertisement_interval": {"required": False, "type": "int"},
"allowas_in": {"required": False, "type": "int"},
"allowas_in_enable": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"allowas_in_enable6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"allowas_in6": {"required": False, "type": "int"},
"as_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"as_override6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"attribute_unchanged": {"required": False, "type": "str",
"choices": ["as-path", "med", "next-hop"]},
"attribute_unchanged6": {"required": False, "type": "str",
"choices": ["as-path", "med", "next-hop"]},
"bfd": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_default_originate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_default_originate6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_dynamic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_graceful_restart": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_graceful_restart6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_orf": {"required": False, "type": "str",
"choices": ["none", "receive", "send",
"both"]},
"capability_orf6": {"required": False, "type": "str",
"choices": ["none", "receive", "send",
"both"]},
"capability_route_refresh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"connect_timer": {"required": False, "type": "int"},
"default_originate_routemap": {"required": False, "type": "str"},
"default_originate_routemap6": {"required": False, "type": "str"},
"description": {"required": False, "type": "str"},
"distribute_list_in": {"required": False, "type": "str"},
"distribute_list_in6": {"required": False, "type": "str"},
"distribute_list_out": {"required": False, "type": "str"},
"distribute_list_out6": {"required": False, "type": "str"},
"dont_capability_negotiate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ebgp_enforce_multihop": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ebgp_multihop_ttl": {"required": False, "type": "int"},
"filter_list_in": {"required": False, "type": "str"},
"filter_list_in6": {"required": False, "type": "str"},
"filter_list_out": {"required": False, "type": "str"},
"filter_list_out6": {"required": False, "type": "str"},
"holdtime_timer": {"required": False, "type": "int"},
"interface": {"required": False, "type": "str"},
"keep_alive_timer": {"required": False, "type": "int"},
"link_down_failover": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_as": {"required": False, "type": "int"},
"local_as_no_prepend": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_as_replace_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix": {"required": False, "type": "int"},
"maximum_prefix_threshold": {"required": False, "type": "int"},
"maximum_prefix_threshold6": {"required": False, "type": "int"},
"maximum_prefix_warning_only": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix_warning_only6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix6": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"next_hop_self": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"next_hop_self6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override_capability": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"passive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"prefix_list_in": {"required": False, "type": "str"},
"prefix_list_in6": {"required": False, "type": "str"},
"prefix_list_out": {"required": False, "type": "str"},
"prefix_list_out6": {"required": False, "type": "str"},
"remote_as": {"required": False, "type": "int"},
"remove_private_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"remove_private_as6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"restart_time": {"required": False, "type": "int"},
"retain_stale_time": {"required": False, "type": "int"},
"route_map_in": {"required": False, "type": "str"},
"route_map_in6": {"required": False, "type": "str"},
"route_map_out": {"required": False, "type": "str"},
"route_map_out6": {"required": False, "type": "str"},
"route_reflector_client": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_reflector_client6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_server_client": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_server_client6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"send_community": {"required": False, "type": "str",
"choices": ["standard", "extended", "both",
"disable"]},
"send_community6": {"required": False, "type": "str",
"choices": ["standard", "extended", "both",
"disable"]},
"shutdown": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"soft_reconfiguration": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"soft_reconfiguration6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"stale_route": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"strict_capability_match": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"unsuppress_map": {"required": False, "type": "str"},
"unsuppress_map6": {"required": False, "type": "str"},
"update_source": {"required": False, "type": "str"},
"weight": {"required": False, "type": "int"}
}},
"neighbor_range": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"max_neighbor_num": {"required": False, "type": "int"},
"neighbor_group": {"required": False, "type": "str"},
"prefix": {"required": False, "type": "str"}
}},
"neighbor_range6": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"max_neighbor_num": {"required": False, "type": "int"},
"neighbor_group": {"required": False, "type": "str"},
"prefix6": {"required": False, "type": "str"}
}},
"network": {"required": False, "type": "list",
"options": {
"backdoor": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"},
"route_map": {"required": False, "type": "str"}
}},
"network_import_check": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"network6": {"required": False, "type": "list",
"options": {
"backdoor": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": True, "type": "int"},
"prefix6": {"required": False, "type": "str"},
"route_map": {"required": False, "type": "str"}
}},
"redistribute": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"},
"route_map": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"redistribute6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"},
"route_map": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"router_id": {"required": False, "type": "str"},
"scan_time": {"required": False, "type": "int"},
"synchronization": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_router(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_router(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Test trajectory #1
==================
Create some example audio files with a trajectory in it.
"""
import os
import numpy as np
import pandas as pd
import yaml
## %
# Make the required source files for the simulated audio
mic_geometry = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
source_x = np.linspace(-10,10,5)
source_positions = np.array(np.meshgrid(source_x, source_x, source_x,)).T.reshape(-1,3)
# save the array geometry file
array_geom_file = 'array_geom.csv'
source_pos_file = 'source_pos.csv'
pd.DataFrame(mic_geometry, columns=['x','y','z']).to_csv(array_geom_file)
pd.DataFrame(source_positions, columns=['x','y','z']).to_csv(source_pos_file)
yaml_entries = {}
yaml_entries['array_geometry'] = array_geom_file
yaml_entries['source_position'] = source_pos_file
yaml_entries['sample_rate'] = 250000
yaml_entries['sim_name'] = 'batracker_simple'
with open('sim_audio_params.yaml', 'w') as f:
yaml.dump(yaml_entries, f)
## %
# Call tacost and get the audio files
import os
stream = os.popen('bash -i make_sim_audio.sh')
output = stream.read()
output
|
#!/usr/bin/env python3
from nlptools.text.tokenizer import Segment_Rest
cfg = {'TOKENIZER':'http://127.0.0.1:8000/api/tokenize/'}
t = Segment_Rest(cfg)
#text = 'Change syntax themes, default project pages, and more in preferences.\n hello world'
text = "今天天气好不错啊"
print(t.seg(text))
|
#!/usr/bin/env python
#
# https://github.com/dbrandt/rerun
#
# A simple script that tries to recreate an approximate docker run
# command from the metadata of a running container. It's not complete
# and it's not tested in very many situations. If you want to extend it,
# please do. I'll accept any pull request within the scope.
#
# Daniel Brandt <me@dbrandt.se>
#
import sys
import shlex
import docker
usage = """Recreate an approximate docker run-command from a running container.
%s <container_id>""" % (sys.argv[0],)
def get_container_config(api_client, container_id):
c = api_client.containers(filters={"id": container_id})
config = api_client.inspect_container(c[0].get("Id")).get("Config")
return config
def construct_command(config):
cmd = "docker run --rm -it \\\n"
if config.get("Entrypoint"):
cmd += " --entrypoint %(Entrypoint)s \\\n" % config
for env in config.get("Env", []):
key, val = env.split("=")
cmd += " -e %s=%s \\\n" % (key, shlex.quote(val))
for port, _ in config.get("ExposedPorts").items():
port, proto = port.split("/")
if proto == "tcp":
cmd += " -p %s:%s \\\n" % (port, port)
cmd += " %(Image)s \\\n" % config
cmd += " %s\n" % (" ".join(config.get("Cmd")),)
return cmd
if __name__ == "__main__":
if len(sys.argv) < 2:
print(usage)
sys.exit(1)
cid = sys.argv[1]
api_client = docker.APIClient()
config = get_container_config(api_client, cid)
cmd = construct_command(config)
print(cmd)
|
#!/user/bin/python
# -*- coding: utf-8 -*-
"""osica_conejo.py"""
def osico_canibal(caballero): # Las variables pueden ser todas las que se quieran
if caballero == "Antares":
print "A Antares no!"
else:
print "El osico asesina a Sir "+caballero
osico_canibal("Ángel")
osico_canibal("Fergu")
osico_canibal("Antares")
|
#!/usr/bin/env python
"""
@author Jesse Haviland
"""
import numpy as np
import roboticstoolbox as rp
from spatialmath import SE3
from spatialmath import base
class RobotPlot():
def __init__(
self, robot, env, readonly, display=True,
jointaxes=True, jointlabels=False, eeframe=True, shadow=True,
name=True, options=None):
super(RobotPlot, self).__init__()
# Readonly - True for this robot is for displaying only
self.readonly = readonly
# To show to robot in the plot or not
# If not displayed, the robot is still simulated
self.display = display
self.robot = robot
self.env = env
self.ax = env.ax
# Line plot of robot links
self.links = None
# Z-axis Coordinate frame (quiver) of joints
self.joints = []
# Text of the robots name
self.name = None
# Shadow of the the line plot on the x-y axis
self.sh_links = None
# Coordinate frame of the ee (three quivers)
self.ee_axes = []
# Robot has been drawn
self.drawn = False
# Display options
self.eeframe = eeframe
self.jointaxes = jointaxes
self.jointlabels = jointlabels
self.shadow = shadow
self.showname = name
defaults = {
'robot': {'color': '#E16F6D', 'linewidth': 5},
'shadow': {'color': 'lightgrey', 'linewidth': 3},
'jointaxes': {'color': '#8FC1E2', 'linewidth': 2},
'jointlabels': {},
'jointaxislength': 0.2,
'eex': {'color': '#F84752', 'linewidth': 2}, # '#EE9494'
'eey': {'color': '#BADA55', 'linewidth': 2}, # '#93E7B0'
'eez': {'color': '#54AEFF', 'linewidth': 2},
'eelength': 0.06,
}
if options is not None:
for key, value in options.items():
defaults[key] = {**defaults[key], **options[key]}
self.options = defaults
def draw(self):
if not self.display:
return
if not self.drawn:
self.init()
return
## Update the robot links
# compute all link frames
T = self.robot.fkine_all(self.robot.q)
# draw all the line segments for the noodle plot
for i, segment in enumerate(self.segments):
linkframes = []
for link in segment:
if link is None:
linkframes.append(self.robot.base)
else:
linkframes.append(T[link.number])
points = np.array([linkframe.t for linkframe in linkframes])
self.links[i].set_xdata(points[:, 0])
self.links[i].set_ydata(points[:,1 ])
self.links[0].set_3d_properties(points[:,2 ])
# Update the shadow of the robot links
if self.shadow:
self.sh_links[i].set_xdata(points[:, 0])
self.sh_links[i].set_ydata(points[:, 1])
self.sh_links[i].set_3d_properties(0)
## Draw the end-effector coordinate frames
# remove old ee coordinate frame
if self.eeframes:
for quiver in self.eeframes:
quiver.remove()
self.eeframes = []
if self.eeframe:
# Axes arrow transforms
len = self.options['eelength']
Tjx = SE3([len, 0, 0])
Tjy = SE3([0, len, 0])
Tjz = SE3([0, 0, len])
# add new ee coordinate frame
for link in self.robot.ee_links:
Te = T[link.number]
# ee axes arrows
Tex = Te * Tjx
Tey = Te * Tjy
Tez = Te * Tjz
xaxis = self._plot_quiver(Te.t, Tex.t, self.options['eex'])
yaxis = self._plot_quiver(Te.t, Tey.t, self.options['eey'])
zaxis = self._plot_quiver(Te.t, Tez.t, self.options['eez'])
self.eeframes.extend([xaxis, yaxis, zaxis])
## Joint axes
# remove old joint z axes
if self.joints:
for joint in self.joints:
joint.remove()
# del self.joints
self.joints = []
# add new joint axes
if self.jointaxes:
# Plot joint z coordinates
for link in self.robot:
direction = None
if isinstance(self.robot, rp.DHRobot):
# should test MDH I think
Tj = T[link.number - 1]
R = Tj.R
direction = R[:, 2] # z direction
elif link.isjoint:
Tj = T[link.number]
R = Tj.R
if link.v.axis[1] == 'z':
direction = R[:, 2] # z direction
elif link.v.axis[1] == 'y':
direction = R[:, 1] # y direction
elif link.v.axis[1] == 'x':
direction = R[:, 0] # direction
if direction is not None:
arrow = self._plot_quiver2(Tj.t, direction, link.jindex)
self.joints.extend(arrow)
def init(self):
self.drawn = True
if self.env.limits is None:
limits = np.r_[-1, 1, -1, 1, -1, 1] * self.robot.reach * 1.5
self.ax.set_xlim3d([limits[0], limits[1]])
self.ax.set_ylim3d([limits[2], limits[3]])
self.ax.set_zlim3d([limits[4], limits[5]])
self.segments = self.robot.segments()
# Joint and ee poses
Tb = self.robot.base
# loc, joints, ee = self.axes_calcs()
# Plot robot name
if self.showname:
self.name = self.ax.text(
Tb.t[0], Tb.t[1], 0.05, self.robot.name)
# Initialize the robot links
self.links = []
self.sh_links = []
for i in range(len(self.segments)):
# Plot the shadow of the robot links, draw first so robot is always
# in front
if self.shadow:
shadow, = self.ax.plot(
0, 0,
zorder=1,
**self.options['shadow'])
self.sh_links.append(shadow)
line, = self.ax.plot(
0, 0, 0, **self.options['robot'])
self.links.append(line)
self.eeframes = []
self.joints = []
def _plot_quiver(self, p0, p1, options):
qv = self.ax.quiver(
p0[0], p0[1], p0[2],
p1[0] - p0[0],
p1[1] - p0[1],
p1[2] - p0[2],
**options
)
return qv
def _plot_quiver2(self, p0, dir, j):
vec = dir * self.options['jointaxislength']
start = p0 - vec / 2
qv = self.ax.quiver(
start[0], start[1], start[2],
vec[0], vec[1], vec[2],
zorder=5,
**self.options['jointaxes']
)
if self.jointlabels:
pl = p0 - vec * 0.6
label = self.ax.text(pl[0], pl[1], pl[2], f'$q_{j}$', **self.options['jointlabels'] )
return [qv, label]
else:
return [qv]
|
__version__ = "0.0.3"
EXTENSION_NAME = "flask-jwt-persistency"
class JWTPersistency(object):
"""Wrapper class that integrates JWT Persistency Flask application.
To use it, instantiate with an application:
from flask import Flask
from flask_jwt_extended import JWTManager
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
jwt = JWTManager(app)
db = SQLAlchemy(app)
jwtp = JWTPersistency(app, jwt, db)
:param app: The Flask application object.
:param jwt: A JWTManager object from flask_jwt_extended lib.
:param db: A SQLAlchemy object from flask_sqlalchemy lib.
For a full working example please refer to examples folder.
"""
def __init__(self, app=None, jwt=None, db=None):
if app is not None and jwt is not None and db is not None:
self.init_app(app, jwt, db)
def init_app(self, app, jwt, db):
"""Initializes the application with the extension.
:param Flask app: The Flask application object.
"""
if not app.config["SQLALCHEMY_BINDS"]:
app.config["SQLALCHEMY_BINDS"] = {}
app.config["SQLALCHEMY_BINDS"]["jwtptokens"] = dict.get(
app.config, "JWTP_DATABASE_URL", "sqlite:///jwtptokens.db")
app.extensions = getattr(app, "extensions", {})
app.extensions[EXTENSION_NAME] = self
class Token(db.Model):
__bind_key__ = 'jwtptokens'
id = db.Column(db.Integer, primary_key=True)
jti = db.Column(db.String(120), index=True, unique=True)
identity = db.Column(db.String(120), index=True)
revoked = db.Column(db.Boolean)
@staticmethod
def is_jti_blacklisted(jti):
token = Token.query.filter_by(jti=jti).first()
if token is None or token.revoked is True:
return True
return False
@staticmethod
def set_jti_revoked_state(jti, state):
token = Token.query.filter_by(jti=jti).first()
if token:
token.revoked = state
db.create_all(bind='jwtptokens')
self.db = db
self.Token = Token
@jwt.token_in_blocklist_loader
def check_if_token_in_blocklist(jwt_header, jwt_payload):
jti = jwt_payload['jti']
return Token.is_jti_blacklisted(jti)
def new_token(self, jti, identity):
"""
Persist the token generated for a certain '<identity>' in the database.
Usage:
access_token = create_access_token(identity="username")
jti = get_jti(encoded_token=access_token)
jwtp.new_token(jti=jti, identity="username")
"""
new_token = self.Token(jti=jti, identity=identity, revoked=False)
self.db.session.add(new_token)
self.db.session.commit()
def revoke_token(self, jti):
"""
Revoke the token identified by an unique identifier '<jti>' if exists in the database.
Usage:
jti = get_jwt()['jti']
jwtp.revoke_token(jti)
"""
self.Token.set_jti_revoked_state(jti, True)
self.db.session.commit()
def revoke_all_tokens(self, identity):
"""
Revoke all tokens generated for a certain '<identity>' in the database.
Usage:
username = get_jwt_identity()
jwtp.revoke_all_tokens(username)
"""
tokens = self.Token.query.filter_by(identity=identity).all()
for token in tokens:
token.revoked = True
self.db.session.commit()
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common helper module for working with Chrome's processes and windows."""
import logging
import os
import psutil
import re
import win32gui
import win32process
def get_process_name(p):
"""A wrapper to return a psutil.Process name."""
# Process.name was a property prior to version 2.0.
if psutil.version_info[0] < 2:
return p.name
# But it's a function since 2.0.
return p.name()
def get_process_exe(p):
"""A wrapper to return a psutil.Process exe."""
# Process.exe was a property prior to version 2.0.
if psutil.version_info[0] < 2:
return p.exe
# But it's a function since 2.0.
return p.exe()
def get_process_ppid(p):
"""A wrapper to return a psutil.Process ppid."""
# Process.ppid was a property prior to version 2.0.
if psutil.version_info[0] < 2:
return p.ppid
# But it's a function since 2.0.
return p.ppid()
def GetProcessIDAndPathPairs():
"""Returns a list of 2-tuples of (process id, process path).
"""
process_id_and_path_pairs = []
for process in psutil.process_iter():
try:
process_id_and_path_pairs.append((process.pid, get_process_exe(process)))
except psutil.Error:
# It's normal that some processes are not accessible.
pass
return process_id_and_path_pairs
def GetProcessIDs(process_path):
"""Returns a list of IDs of processes whose path is |process_path|.
Args:
process_path: The path to the process.
Returns:
A list of process IDs.
"""
return [pid for (pid, path) in GetProcessIDAndPathPairs() if
path == process_path]
def WaitForChromeExit(chrome_path):
"""Waits for all |chrome_path| processes to exit.
Args:
chrome_path: The path to the chrome.exe on which to wait.
"""
def GetChromeProcesses(chrome_path):
"""Returns a dict of all |chrome_path| processes indexed by pid."""
chrome_processes = dict()
for process in psutil.process_iter():
try:
if get_process_exe(process) == chrome_path:
chrome_processes[process.pid] = process
logging.info('Found chrome process %s' % get_process_exe(process))
elif get_process_name(process) == os.path.basename(chrome_path):
raise Exception(
'Found other chrome process %s' % get_process_exe(process))
except psutil.Error:
pass
return chrome_processes
def GetBrowserProcess(chrome_processes):
"""Returns a psutil.Process for the browser process in |chrome_processes|.
"""
# Find the one whose parent isn't a chrome.exe process.
for process in chrome_processes.itervalues():
try:
if get_process_ppid(process) not in chrome_processes:
return process
except psutil.Error:
pass
return None
chrome_processes = GetChromeProcesses(chrome_path)
while chrome_processes:
# Prefer waiting on the browser process.
process = GetBrowserProcess(chrome_processes)
if not process:
# Pick any process to wait on if no top-level parent was found.
process = next(chrome_processes.itervalues())
if process.is_running():
logging.info(
'Waiting on %s for %s %s processes to exit' %
(str(process), len(chrome_processes), get_process_exe(process)))
process.wait()
# Check for stragglers and keep waiting until all are gone.
chrome_processes = GetChromeProcesses(chrome_path)
def GetWindowHandles(process_ids):
"""Returns a list of handles of windows owned by processes in |process_ids|.
Args:
process_ids: A list of process IDs.
Returns:
A list of handles of windows owned by processes in |process_ids|.
"""
hwnds = []
def EnumerateWindowCallback(hwnd, _):
_, found_process_id = win32process.GetWindowThreadProcessId(hwnd)
if found_process_id in process_ids and win32gui.IsWindowVisible(hwnd):
hwnds.append(hwnd)
# Enumerate all the top-level windows and call the callback with the hwnd as
# the first parameter.
win32gui.EnumWindows(EnumerateWindowCallback, None)
return hwnds
def WindowExists(process_ids, class_pattern):
"""Returns whether there exists a window with the specified criteria.
This method returns whether there exists a window that is owned by a process
in |process_ids| and has a class name that matches |class_pattern|.
Args:
process_ids: A list of process IDs.
class_pattern: The regular expression pattern of the window class name.
Returns:
A boolean indicating whether such window exists.
"""
for hwnd in GetWindowHandles(process_ids):
if re.match(class_pattern, win32gui.GetClassName(hwnd)):
return True
return False
|
#!/usr/bin/env python
"""
A module to manipulate files on EOS or on the local file system. Intended to have the same interface as castortools.py.
"""
from __future__ import print_function
import sys
import os
import re
import shutil
import io
import zlib
import subprocess
def splitPFN(pfn):
"""Split the PFN in to { <protocol>, <host>, <path>, <opaque> }"""
groups = re.match("^(\w+)://([^/]+)/(/[^?]+)(\?.*)?", pfn)
if not groups: raise RuntimeError("Malformed pfn: '%s'" % pfn)
return (groups.group(1), groups.group(2), groups.group(3), groups.group(4))
def _runCommand(cmd):
myCommand = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
( out, err ) = myCommand.communicate()
if myCommand.returncode != 0:
print("Command (%s) failed with return code: %d" % ( cmd, myCommand.returncode ), file=sys.stderr)
print(err, file=sys.stderr)
return out,err,myCommand.returncode
def runXRDCommand(path, cmd, *args):
"""Run an xrd command.
!!! Will, what is happening in case of problem?
??? At some point, should return a list of lines instead of a string."""
#print( "lfn:", lfn, cmd )
tokens = splitPFN(path)
command = ['xrd', tokens[1], cmd, tokens[2]]
command.extend(args)
# print( ' '.join(command) )
return _runCommand(command)
def runEOSCommand(path, cmd, *args):
"""Run an eos command.
!!! Will, when the EOS command fails, it passes silently...
I think we should really try and raise an exception in case of problems.
should be possible as the return code is provided in the tuple returned by runner."""
tokens = splitPFN(path)
#obviously, this is not nice
command = ['eos', cmd]
command.extend(args)
command.append(tokens[2])
return _runCommand(command)
def isLFN( path ):
"""Tests whether this path is a CMS LFN (name starts with /store...)"""
# return re.match('^/store.*', path ) is not None
return path.startswith('/store')
def isEOS( path ):
"""Tests whether this path is a CMS EOS (name starts with /eos...)"""
return path.startswith('/eos') or path.startswith('root://eoscms.cern.ch//eos/cms')
def eosToLFN( path ):
"""Converts a EOS PFN to an LFN.
Just strip out /eos/cms from path.
If this string is not found, return path.
??? Shouldn't we raise an exception instead?"""
return path.replace('root://eoscms.cern.ch/', '').replace('/eos/cms','')
#also define an alias for backwards compatibility
castorToLFN = eosToLFN
def lfnToPFN( path, tfcProt = 'rfio'):
"""Converts an LFN to a PFN. For example:
/store/cmst3/user/cbern/CMG/TauPlusX/Run2011A-03Oct2011-v1/AOD/V2/PAT_CMG_V2_4_0/H2TAUTAU_Nov21
->
root://eoscms//eos/cms/store/cmst3/user/cbern/CMG/TauPlusX/Run2011A-03Oct2011-v1/AOD/V2/PAT_CMG_V2_4_0/H2TAUTAU_Nov21?svcClass=cmst3&stageHost=castorcms
This function only checks path, and does not access the storage system.
If the path is in /store/cmst3, it assumes that the CMST3 svcClass is to be used.
Otherwise, is uses the default one.
??? what is tfcprot? """
if path.startswith("/store/"):
path = path.replace("/store/","root://eoscms.cern.ch//eos/cms/store/")
elif path.startswith("/pnfs/psi.ch/cms/trivcat/"):
path = path.replace("/pnfs/psi.ch/cms/trivcat/","root://t3se01.psi.ch//")
if ":" in path: pfn = path
else: pfn = "file:"+path
return pfn
def lfnToEOS( path ):
"""Converts LFN to EOS.
If path is not an LFN in the first place, return path.
??? shouldn't we raise an exception?"""
if isLFN(path):
pfn = 'root://eoscms.cern.ch//eos/cms/' + path
return pfn.replace('//store','/store')
else:
return path
#also define an alias for backwards compatibility
lfnToCastor = lfnToEOS
def isEOSDir( path ):
"""Returns True if path is either:
/store/...
or
/eos/cms/store/...
or
root://eoscms.cern.ch//eos/cms/
Otherwise, returns False.
"""
return path.startswith('/eos') or path.startswith('/store') or path.startswith('root://eoscms.cern.ch//eos/cms/') or path.startswith('root://eoscms//eos/cms/')
#also define an alias for backwards compatibility
isCastorDir = isEOSDir
def isEOSFile( path ):
"""Returns True if path is a file or directory stored on EOS (checks for path existence)"""
if not isEOSDir(path): return False
_, _, ret = runEOSCommand( path, 'ls')
return ret == 0
#also define an alias for backwards compatibility
isCastorFile = isEOSFile
def fileExists( path ):
"""Returns true if path is a file or directory stored locally, or on EOS.
This function checks for the file or directory existence."""
eos = isEOSDir(path)
result = False
if eos:
# print('eos: ' + path)
result = isEOSFile(path)
else:
# print('not eos: ' + path)
#check locally
result = os.path.exists(path)
# print(result)
return result
def eosDirSize(path):
'''Returns the size of a directory on EOS in GB.'''
lfn = eosToLFN(path)
res = runEOSCommand(lfn, 'find', '--size')
output = res[0].split('\n')
size = 0
for file in output:
try:
size += float(file.split('=')[2])
except IndexError:
pass
return size/1024/1024/1024
def fileChecksum(path):
'''Returns the checksum of a file (local or on EOS).'''
checksum='ERROR'
if not fileExists(path): raise RuntimeError('File does not exist.')
if isEOS(path):
lfn = eosToLFN(path)
res = runEOSCommand(lfn, 'find', '--checksum')
output = res[0].split('\n')[0]
checksum = output.split('=')[2]
else:
f = io.open(path,'r+b')
checksum = 1
buf = ''
while True:
buf = f.read(1024*1024*10) # 10 MB buffer
if len(buf)==0: break # EOF reached
checksum = zlib.adler32(buf,checksum)
checksum = str(hex(checksum & 0xffffffff))[2:]
return checksum.rjust(8,'0')
def createEOSDir( path ):
"""Makes a directory in EOS
???Will, I'm quite worried by the fact that if this path already exists, and is
a file, everything will 'work'. But then we have a file, and not a directory,
while we expect a dir..."""
pfn = lfnToPFN(path)
if not isEOSFile(pfn):
# if not isDirectory(lfn):
runEOSCommand(pfn,'mkdir','-p')
if isDirectory(path):
return path
else:
raise OSError('cannot create directory '+ path)
#also define an alias for backwards compatibility
createCastorDir = createEOSDir
def mkdir(path):
"""Create a directory, either on EOS or locally"""
# print('mkdir '+ path)
if isEOS( path ) or isLFN(path):
createEOSDir(path)
else:
# recursive directory creation (like mkdir -p)
os.makedirs(path)
return path
def isDirectory(path):
"""Returns True if path is a directory on EOS.
Tests for file existence.
This function returns False for EOS files, and crashes with local paths
???Will, this function also seems to work for paths like:
/eos/cms/...
??? I think that it should work also for local files, see isFile."""
out, _, _ = runXRDCommand(path,'existdir')
return 'The directory exists' in out
def isFile(path):
"""Returns True if a path is a file.
Tests for file existence.
Returns False for directories.
Works on EOS and local paths.
???This function works with local files, so not the same as isDirectory...
isFile and isDirectory should behave the same.
"""
if not path.startswith('/eos') and not path.startswith('/store'):
if( os.path.isfile(path) ):
return True
else:
return False
else:
out, _, _ = runXRDCommand(path,'existfile')
return 'The file exists' in out
def chmod(path, mode):
"""Does chmod on a file or directory"""
#
return runEOSCommand(path, 'chmod', '-r', str(mode))
def listFiles(path, rec = False, full_info = False):
"""Provides a list of the specified directory
"""
# -- listing on the local filesystem --
if os.path.isdir( path ):
if not rec:
# not recursive
return [ '/'.join([path,file]) for file in os.listdir( path )]
else:
# recursive, directories are put in the list first,
# followed by the list of all files in the directory tree
result = []
allFiles = []
for root,dirs,files in os.walk(path):
result.extend( [ '/'.join([root,dir]) for dir in dirs] )
allFiles.extend( [ '/'.join([root,file]) for file in files] )
result.extend(allFiles)
return result
# -- listing on EOS --
if not isEOSDir(path):
raise RuntimeError("Bad path '%s': not existent, and not in EOS" % path)
cmd = 'dirlist'
if rec:
cmd = 'dirlistrec'
files, _, _ = runXRDCommand(path, cmd)
result = []
for line in files.split('\n'):
tokens = [t for t in line.split() if t]
if tokens:
#convert to an LFN
# result.append(tuple(tokens))
#COLIN need same interface for eos and local fs
if full_info:
result.append( tokens)
else:
result.append( tokens[4] )
return result
def ls(path, rec = False):
"""Provides a simple list of the specified directory, works on EOS and locally"""
return [eosToLFN(t) for t in listFiles(path, rec)]
def ls_EOS(path, rec = False):
"""Provides a simple list of the specified directory, works on EOS only, but is faster than the xrd version"""
if rec:
stdout, _, ret = runEOSCommand(path,'find','-f')
return [eosToLFN(line) for line in stdout.split('\n') if line]
else:
stdout, _, ret = runEOSCommand(path,'ls')
lfn = eosToLFN(path)
return [os.path.join(lfn,line) for line in stdout.split('\n') if line]
def rm(path, rec=False):
"""rm, works on EOS and locally.
Colin: should implement a -f mode and a confirmation when deleting dirs recursively."""
# print('rm '+ path)
path = lfnToEOS(path)
if isEOS(path):
if rec:
runEOSCommand(path, 'rm', '-r')
else:
runEOSCommand(path,'rm')
elif os.path.exists(path):
if not rec:
os.remove( path )
else:
shutil.rmtree(path)
else:
raise ValueError(path + ' is not EOS and not local... should not happen!')
def remove( files, rec = False):
"""Remove a list of files and directories, possibly recursively
Colin: Is that obsolete? why not use rm?"""
for path in files:
lfn = eosToLFN(path)
if not rec:
rm(path)
else:
#this should be used with care
file_list = ls(path, rec = True)
file_list.append(lfn)
#order the files in depth order - i.e. remove the deepest files first
files_rec = sorted([(len([ff for ff in f.split('/') if ff]), f) for f in file_list if f and f.startswith(lfn)], reverse = True)
for f in files_rec:
rm(f[1])
def cat(path):
"""cat, works on EOS and locally"""
path = lfnToEOS(path)
if isEOS(path):
#print("the file to cat is:"+ path)
out, err, _ = runXRDCommand(path,'cat')
lines = []
if out:
pattern = re.compile('cat returned [0-9]+')
for line in out.split('\n'):
match = pattern.search(line)
if line and match is not None:
lines.append(line.replace(match.group(0),''))
break
else:
lines.append(line)
if err:
print(out, file=sys.stderr)
print(err, file=sys.stderr)
allLines = '\n'.join(lines)
if allLines and not allLines.endswith('\n'):
allLines += '\n'
return allLines
else:
content = file(path).read()
if content and not content.endswith('\n'):
content += '\n'
return content
def xrdcp(src, dest):
"""Does a copy of files using xrd.
Colin: implement a generic cp interface as done for rm, ls, etc?"""
recursive = False
#first the src file
pfn_src = src
if os.path.exists(src):
#local
pfn_src = src
if os.path.isdir(src):
recursive = True
elif fileExists(src):
src = eosToLFN(src)
pfn_src = lfnToPFN(src)
if isDirectory(src):
recursive = True
else:
raise ValueError(src + ' does not exist.')
#now the dest
pfn_dest = dest
if isEOSDir(dest):
dest = eosToLFN(dest)
pfn_dest = lfnToPFN(dest)
if isDirectory(dest):
tokens = splitPFN(pfn_dest)
pfn_dest = '%s://%s//%s/' % (tokens[0],tokens[1],tokens[2])
elif os.path.exists(dest):
pfn_dest = dest
command = ['xrdcp', '--force']
if recursive:
# print('recursive')
topDir = src.rstrip('/').split('/')[-1]
if topDir != '.':
dest = '/'.join([dest, topDir])
# printr( 'mkdir ' + dest )
mkdir( dest )
files = listFiles(src, rec=True)
# pprint.pprint( [file[4] for file in files] )
for srcFile in files:
# srcFile = file[4]
pfnSrcFile = srcFile
if isEOSDir(srcFile):
srcFile = eosToLFN(srcFile)
pfnSrcFile = lfnToPFN(srcFile)
destFile = srcFile.replace( src, '' )
destFile = '/'.join([dest,destFile])
pfnDestFile = destFile
if isEOSDir(destFile):
lfnDestFile = eosToLFN(destFile)
pfnDestFile = lfnToPFN(lfnDestFile)
# print('srcFile '+pfnSrcFile)
# print('destFile '+pfnDestFile)
if isFile(srcFile):
_xrdcpSingleFile( pfnSrcFile, pfnDestFile )
else:
mkdir(destFile)
else:
_xrdcpSingleFile( pfn_src, pfn_dest )
def _xrdcpSingleFile( pfn_src, pfn_dest):
"""Copies a single file using xrd."""
command = ['xrdcp', '--force']
command.append(pfn_src)
command.append(pfn_dest)
# print(' '.join(command))
run = True
if run:
out, err, ret = _runCommand(command)
if err:
print(out, file=sys.stderr)
print(err, file=sys.stderr)
return ret
def move(src, dest):
"""Move filename1 to filename2 locally to the same server"""
src = eosToLFN(src)
dest = eosToLFN(dest)
runXRDCommand(src,'mv', lfnToEOS(dest))
def matchingFiles( path, regexp):
"""Return a list of files matching a regexp"""
# print(path + ' '+ regexp)
pattern = re.compile( regexp )
#files = ls_EOS(path)
files = ls(path)
# print(files)
return [f for f in files if pattern.match(os.path.basename(f)) is not None]
def datasetNotEmpty( path, regexp ):
pattern = re.compile( regexp )
files = ls_EOS(path)
for f in files:
if pattern.match( os.path.basename(f) ) is not None:
return 1
return 0
def cmsStage( absDestDir, files, force):
"""Runs cmsStage with LFNs if possible"""
destIsEOSDir = isEOSDir(absDestDir)
if destIsEOSDir:
createEOSDir( absDestDir )
for fname in files:
command = ['cmsStage']
if force:
command.append('-f')
command.append(eosToLFN(fname))
command.append(eosToLFN(absDestDir))
print(' '.join(command))
_runCommand(command)
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Find/Replace widget"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
import re
# Third party imports
from qtpy.QtCore import Qt, QTimer, Signal, Slot
from qtpy.QtGui import QTextCursor
from qtpy.QtWidgets import (QCheckBox, QGridLayout, QHBoxLayout, QLabel,
QSizePolicy, QWidget)
# Local imports
from spyder.config.base import _
from spyder.config.gui import config_shortcut, fixed_shortcut
from spyder.py3compat import to_text_string
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import create_toolbutton, get_icon
from spyder.widgets.comboboxes import PatternComboBox
def is_position_sup(pos1, pos2):
"""Return True is pos1 > pos2"""
return pos1 > pos2
def is_position_inf(pos1, pos2):
"""Return True is pos1 < pos2"""
return pos1 < pos2
class FindReplace(QWidget):
"""Find widget"""
STYLE = {False: "background-color:rgb(255, 175, 90);",
True: "",
None: ""}
visibility_changed = Signal(bool)
def __init__(self, parent, enable_replace=False):
QWidget.__init__(self, parent)
self.enable_replace = enable_replace
self.editor = None
self.is_code_editor = None
glayout = QGridLayout()
glayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(glayout)
self.close_button = create_toolbutton(self, triggered=self.hide,
icon=ima.icon('DialogCloseButton'))
glayout.addWidget(self.close_button, 0, 0)
# Find layout
self.search_text = PatternComboBox(self, tip=_("Search string"),
adjust_to_minimum=False)
self.search_text.valid.connect(
lambda state:
self.find(changed=False, forward=True, rehighlight=False))
self.search_text.lineEdit().textEdited.connect(
self.text_has_been_edited)
self.previous_button = create_toolbutton(self,
triggered=self.find_previous,
icon=ima.icon('ArrowUp'))
self.next_button = create_toolbutton(self,
triggered=self.find_next,
icon=ima.icon('ArrowDown'))
self.next_button.clicked.connect(self.update_search_combo)
self.previous_button.clicked.connect(self.update_search_combo)
self.re_button = create_toolbutton(self, icon=ima.icon('advanced'),
tip=_("Regular expression"))
self.re_button.setCheckable(True)
self.re_button.toggled.connect(lambda state: self.find())
self.case_button = create_toolbutton(self,
icon=get_icon("upper_lower.png"),
tip=_("Case Sensitive"))
self.case_button.setCheckable(True)
self.case_button.toggled.connect(lambda state: self.find())
self.words_button = create_toolbutton(self,
icon=get_icon("whole_words.png"),
tip=_("Whole words"))
self.words_button.setCheckable(True)
self.words_button.toggled.connect(lambda state: self.find())
self.highlight_button = create_toolbutton(self,
icon=get_icon("highlight.png"),
tip=_("Highlight matches"))
self.highlight_button.setCheckable(True)
self.highlight_button.toggled.connect(self.toggle_highlighting)
hlayout = QHBoxLayout()
self.widgets = [self.close_button, self.search_text,
self.previous_button, self.next_button,
self.re_button, self.case_button, self.words_button,
self.highlight_button]
for widget in self.widgets[1:]:
hlayout.addWidget(widget)
glayout.addLayout(hlayout, 0, 1)
# Replace layout
replace_with = QLabel(_("Replace with:"))
self.replace_text = PatternComboBox(self, adjust_to_minimum=False,
tip=_('Replace string'))
self.replace_button = create_toolbutton(self,
text=_('Replace/find'),
icon=ima.icon('DialogApplyButton'),
triggered=self.replace_find,
text_beside_icon=True)
self.replace_button.clicked.connect(self.update_replace_combo)
self.replace_button.clicked.connect(self.update_search_combo)
self.all_check = QCheckBox(_("Replace all"))
self.replace_layout = QHBoxLayout()
widgets = [replace_with, self.replace_text, self.replace_button,
self.all_check]
for widget in widgets:
self.replace_layout.addWidget(widget)
glayout.addLayout(self.replace_layout, 1, 1)
self.widgets.extend(widgets)
self.replace_widgets = widgets
self.hide_replace()
self.search_text.setTabOrder(self.search_text, self.replace_text)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.shortcuts = self.create_shortcuts(parent)
self.highlight_timer = QTimer(self)
self.highlight_timer.setSingleShot(True)
self.highlight_timer.setInterval(1000)
self.highlight_timer.timeout.connect(self.highlight_matches)
def create_shortcuts(self, parent):
"""Create shortcuts for this widget"""
# Configurable
findnext = config_shortcut(self.find_next, context='_',
name='Find next', parent=parent)
findprev = config_shortcut(self.find_previous, context='_',
name='Find previous', parent=parent)
togglefind = config_shortcut(self.show, context='_',
name='Find text', parent=parent)
togglereplace = config_shortcut(self.toggle_replace_widgets,
context='_', name='Replace text',
parent=parent)
# Fixed
fixed_shortcut("Escape", self, self.hide)
return [findnext, findprev, togglefind, togglereplace]
def get_shortcut_data(self):
"""
Returns shortcut data, a list of tuples (shortcut, text, default)
shortcut (QShortcut or QAction instance)
text (string): action/shortcut description
default (string): default key sequence
"""
return [sc.data for sc in self.shortcuts]
def update_search_combo(self):
self.search_text.lineEdit().returnPressed.emit()
def update_replace_combo(self):
self.replace_text.lineEdit().returnPressed.emit()
def toggle_replace_widgets(self):
if self.enable_replace:
# Toggle replace widgets
if self.replace_widgets[0].isVisible():
self.hide_replace()
self.hide()
else:
self.show_replace()
self.replace_text.setFocus()
@Slot(bool)
def toggle_highlighting(self, state):
"""Toggle the 'highlight all results' feature"""
if self.editor is not None:
if state:
self.highlight_matches()
else:
self.clear_matches()
def show(self):
"""Overrides Qt Method"""
QWidget.show(self)
self.visibility_changed.emit(True)
if self.editor is not None:
text = self.editor.get_selected_text()
# If no text is highlighted for search, use whatever word is under
# the cursor
if not text:
try:
cursor = self.editor.textCursor()
cursor.select(QTextCursor.WordUnderCursor)
text = to_text_string(cursor.selectedText())
except AttributeError:
# We can't do this for all widgets, e.g. WebView's
pass
# Now that text value is sorted out, use it for the search
if text:
self.search_text.setEditText(text)
self.search_text.lineEdit().selectAll()
self.refresh()
else:
self.search_text.lineEdit().selectAll()
self.search_text.setFocus()
@Slot()
def hide(self):
"""Overrides Qt Method"""
for widget in self.replace_widgets:
widget.hide()
QWidget.hide(self)
self.visibility_changed.emit(False)
if self.editor is not None:
self.editor.setFocus()
self.clear_matches()
def show_replace(self):
"""Show replace widgets"""
self.show()
for widget in self.replace_widgets:
widget.show()
def hide_replace(self):
"""Hide replace widgets"""
for widget in self.replace_widgets:
widget.hide()
def refresh(self):
"""Refresh widget"""
if self.isHidden():
if self.editor is not None:
self.clear_matches()
return
state = self.editor is not None
for widget in self.widgets:
widget.setEnabled(state)
if state:
self.find()
def set_editor(self, editor, refresh=True):
"""
Set associated editor/web page:
codeeditor.base.TextEditBaseWidget
browser.WebView
"""
self.editor = editor
# Note: This is necessary to test widgets/editor.py
# in Qt builds that don't have web widgets
try:
from qtpy.QtWebEngineWidgets import QWebEngineView
except ImportError:
QWebEngineView = type(None)
self.words_button.setVisible(not isinstance(editor, QWebEngineView))
self.re_button.setVisible(not isinstance(editor, QWebEngineView))
from spyder.widgets.sourcecode.codeeditor import CodeEditor
self.is_code_editor = isinstance(editor, CodeEditor)
self.highlight_button.setVisible(self.is_code_editor)
if refresh:
self.refresh()
if self.isHidden() and editor is not None:
self.clear_matches()
@Slot()
def find_next(self):
"""Find next occurrence"""
state = self.find(changed=False, forward=True, rehighlight=False)
self.editor.setFocus()
self.search_text.add_current_text()
return state
@Slot()
def find_previous(self):
"""Find previous occurrence"""
state = self.find(changed=False, forward=False, rehighlight=False)
self.editor.setFocus()
return state
def text_has_been_edited(self, text):
"""Find text has been edited (this slot won't be triggered when
setting the search pattern combo box text programmatically"""
self.find(changed=True, forward=True, start_highlight_timer=True)
def highlight_matches(self):
"""Highlight found results"""
if self.is_code_editor and self.highlight_button.isChecked():
text = self.search_text.currentText()
words = self.words_button.isChecked()
regexp = self.re_button.isChecked()
self.editor.highlight_found_results(text, words=words,
regexp=regexp)
def clear_matches(self):
"""Clear all highlighted matches"""
if self.is_code_editor:
self.editor.clear_found_results()
def find(self, changed=True, forward=True,
rehighlight=True, start_highlight_timer=False):
"""Call the find function"""
text = self.search_text.currentText()
if len(text) == 0:
self.search_text.lineEdit().setStyleSheet("")
if not self.is_code_editor:
# Clears the selection for WebEngine
self.editor.find_text('')
return None
else:
case = self.case_button.isChecked()
words = self.words_button.isChecked()
regexp = self.re_button.isChecked()
found = self.editor.find_text(text, changed, forward, case=case,
words=words, regexp=regexp)
self.search_text.lineEdit().setStyleSheet(self.STYLE[found])
if self.is_code_editor and found:
if rehighlight or not self.editor.found_results:
self.highlight_timer.stop()
if start_highlight_timer:
self.highlight_timer.start()
else:
self.highlight_matches()
else:
self.clear_matches()
return found
@Slot()
def replace_find(self):
"""Replace and find"""
if (self.editor is not None):
replace_text = to_text_string(self.replace_text.currentText())
search_text = to_text_string(self.search_text.currentText())
pattern = search_text if self.re_button.isChecked() else None
case = self.case_button.isChecked()
first = True
cursor = None
while True:
if first:
# First found
seltxt = to_text_string(self.editor.get_selected_text())
cmptxt1 = search_text if case else search_text.lower()
cmptxt2 = seltxt if case else seltxt.lower()
if self.editor.has_selected_text() and cmptxt1 == cmptxt2:
# Text was already found, do nothing
pass
else:
if not self.find(changed=False, forward=True,
rehighlight=False):
break
first = False
wrapped = False
position = self.editor.get_position('cursor')
position0 = position
cursor = self.editor.textCursor()
cursor.beginEditBlock()
else:
position1 = self.editor.get_position('cursor')
if is_position_inf(position1,
position0 + len(replace_text) -
len(search_text) + 1):
# Identify wrapping even when the replace string
# includes part of the search string
wrapped = True
if wrapped:
if position1 == position or \
is_position_sup(position1, position):
# Avoid infinite loop: replace string includes
# part of the search string
break
if position1 == position0:
# Avoid infinite loop: single found occurrence
break
position0 = position1
if pattern is None:
cursor.removeSelectedText()
cursor.insertText(replace_text)
else:
seltxt = to_text_string(cursor.selectedText())
cursor.removeSelectedText()
cursor.insertText(re.sub(pattern, replace_text, seltxt))
if self.find_next():
found_cursor = self.editor.textCursor()
cursor.setPosition(found_cursor.selectionStart(),
QTextCursor.MoveAnchor)
cursor.setPosition(found_cursor.selectionEnd(),
QTextCursor.KeepAnchor)
else:
break
if not self.all_check.isChecked():
break
self.all_check.setCheckState(Qt.Unchecked)
if cursor is not None:
cursor.endEditBlock()
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import dsz
import dsz.cmd
import dsz.data
import dsz.lp
class Throttle(dsz.data.Task):
def __init__(self, cmd=None):
dsz.data.Task.__init__(self, cmd)
def _LoadData(self):
self.ThrottleItem = list()
try:
for x in dsz.cmd.data.Get('ThrottleItem', dsz.TYPE_OBJECT):
self.ThrottleItem.append(Throttle.ThrottleItem(x))
except:
pass
class ThrottleItem(dsz.data.DataBean):
def __init__(self, obj):
try:
self.enabled = dsz.cmd.data.ObjectGet(obj, 'enabled', dsz.TYPE_BOOL)[0]
except:
self.enabled = None
try:
self.bytesPerSecond = dsz.cmd.data.ObjectGet(obj, 'bytesPerSecond', dsz.TYPE_INT)[0]
except:
self.bytesPerSecond = None
try:
self.address = dsz.cmd.data.ObjectGet(obj, 'address', dsz.TYPE_STRING)[0]
except:
self.address = None
return
dsz.data.RegisterCommand('Throttle', Throttle)
THROTTLE = Throttle
throttle = Throttle
|
n=int(input(('Digite um número para ver sua tabuada: ')))
print('A tabuada do {} é: '.format(n))
for c in range (1,11):
print('{} x {} = {} '.format(n,c,n*c))
|
from 101703088-topsis.topsis import Topsis
|
# Generated by Django 2.1.11 on 2020-04-08 01:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('files', '0014_add_dbGaP_type'),
]
operations = [
migrations.AlterModelOptions(
name='devdownloadtoken',
options={'permissions': [('list_all_version', 'Can list all versions')]},
),
migrations.AlterModelOptions(
name='file',
options={'permissions': [('list_all_file', 'Can list all files'), ('view_my_file', 'Can view all files in studies user is a member of'), ('add_my_study_file', 'Can add files to studies the user is a member of'), ('change_my_study_file', 'Can change files in studies the user is a member of')]},
),
migrations.AlterModelOptions(
name='version',
options={'permissions': [('view_my_version', 'Can view all versions in studies user is a member of'), ('add_my_study_version', 'Can add versions to studies the user is a member of'), ('change_my_study_version', 'Can change versions in studies the user is a member of')]},
),
]
|
"""Preprocess the Artist Workshop environment map
The environment map is available for download from
https://hdrihaven.com/files/hdris/artist_workshop_8k.hdr
"""
from __future__ import print_function
from utils import cmgen
SRC_FILENAME = "artist_workshop_8k.hdr"
DST_DIRECTORY = "../assets/workshop"
DST_LOOKUP_TEX = "../assets/common/brdf_lut"
def main():
cmgen.gen_environment_maps(SRC_FILENAME, DST_DIRECTORY)
cmgen.enlarge_specular_maps(SRC_FILENAME, DST_DIRECTORY)
cmgen.gen_lookup_texture(DST_LOOKUP_TEX)
if __name__ == "__main__":
main()
|
import pymc3 as pm
import numpy as np
import theano
def solve_single_gp(lengthscales, X0, samples, noise=1.0e-3):
cov = pm.gp.cov.ExpQuad(len(lengthscales), ls=lengthscales)
K = cov(X0.T)
K_noise = K + pm.gp.cov.WhiteNoise(noise)(X0.T)
L = np.linalg.cholesky(K_noise.eval())
alpha = np.linalg.solve(
L.T, np.linalg.solve(L, samples.flatten()))
return cov, alpha
def gp_predict(cov, alpha, X0, X1):
K_s = cov(X0.T, X1.T)
post_mean = np.dot(K_s.T.eval(), alpha)
return post_mean
def gp_predictt(cov, alpha, X0, X1):
K_s = cov(X0.T, X1.T)
post_mean = theano.tensor.dot(K_s.T, alpha)
return post_mean
def gp_learn_emline_space(sparsegrids, grid_preds, ls_mult=1.5, noise=1.0e-4):
'''
use a GP model to learn the emission-line prediction space for one observable
'''
XX0 = np.meshgrid(*[g for g in sparsegrids], indexing='ij')
X0 = np.row_stack([a.flatten() for a in XX0])
ls = ls_mult * np.array(list(map(
lambda a: np.mean(np.diff(a)), sparsegrids)))
covs, alphas = zip(*[solve_single_gp(ls, X0, pred.flatten(), noise=noise)
for pred in grid_preds])
return X0, covs, alphas
|
"""
Standard process. Uniquely associated to a CIM_ComputerSystem and a parent CIM_Process.
"""
import os
import sys
import psutil
import rdflib
from rdflib.namespace import RDF
import logging
import lib_uris
import lib_common
import lib_util
from lib_properties import pc
import lib_properties
from lib_psutil import *
def GetEnvVarMap(the_pid):
"""Returns the dict of environment variables of a given process."""
# TODO: Apparently, it exists in psutil.Process().environ() ??
if lib_util.isPlatformLinux:
filproc = open("/proc/%d/environ" % the_pid)
map_envs = {}
envlin = filproc.readlines()
for li in envlin[0].split("\0"):
pos_equ = li.find("=")
map_envs[li[:pos_equ]] = li[pos_equ+1:]
filproc.close()
return map_envs
# https://www.codeproject.com/kb/threads/readprocenv.aspx
if lib_util.isPlatformWindows:
# TODO: Not implemented yet.
return {}
return {}
def GetEnvVarProcess(the_env_var, the_pid):
try:
return GetEnvVarMap(the_pid)[the_env_var]
except KeyError:
return None
################################################################################
def EntityOntology():
return (["Handle"],)
def EntityName(entity_ids_arr):
entity_id = entity_ids_arr[0]
# If the process is not there, this is not a problem.
try:
proc_obj = psutil.Process(int(entity_id))
return PsutilProcToName(proc_obj)
except psutil.NoSuchProcess:
# This might be, on Windows, a prent process which exit.
return "Non-existent process:" + entity_id
except ValueError:
return "Invalid pid:(" + entity_id + ")"
def _add_command_line_and_executable(grph, node, proc_obj):
"""This aadds to the node of a process, its command line and the name of the executable."""
cmd_line = PsutilProcToCmdline(proc_obj)
node_cmd_line = rdflib.Literal(cmd_line)
grph.add((node, pc.property_command, node_cmd_line))
exec_name, exec_err_msg = PsutilProcToExe(proc_obj)
if exec_name == "":
grph.add((node, pc.property_runs, rdflib.Literal("Executable error:" + exec_err_msg)))
else:
exec_node = lib_uris.gUriGen.FileUri(exec_name)
grph.add((node, pc.property_runs, exec_node))
return node_cmd_line
def _command_line_argument_to_node(process_cwd, file_path):
"""This receives a string which is a command line argument and may be a path.
If proven so, it returns the node, otherwise None. """
if process_cwd is None:
return None
# TODO: If it starts with "-" or "--", or "/" or Windows, then it might be an option
if lib_util.isPlatformWindows and file_path.startswith("\\??\\"):
# Corner case on Windows: file_path=r"\??\C:\windows\system32\conhost.exe"
file_path = file_path[4:]
full_path = os.path.join(process_cwd, file_path)
# TODO: Should simply try if the path is valid.
logging.debug("_command_line_argument_to_node full_path=%s" % full_path)
if os.path.exists(full_path):
if os.path.isdir(full_path):
logging.debug("_command_line_argument_to_node DIR:%s" % full_path)
return lib_uris.gUriGen.DirectoryUri(full_path)
elif os.path.isfile(full_path):
logging.debug("_command_line_argument_to_node FILE:%s" % full_path)
return lib_uris.gUriGen.FileUri(full_path)
else:
logging.warning("_command_line_argument_to_node INVALID:%s" % full_path)
return None
def add_command_line_arguments(grph, node, proc_obj):
"""
The input is a psutil process object. This adds to the input node triples describing
all parameters of the command line of the process.
"""
# TODO: The command line could be a classe because it can be found:
# TODO: - In a process.
# TODO: - In a makefile or a Visual Studio vcxproj file, and an ANT file.
# TODO: A command is an unique concept which can be shared by several processes.
# TODO: If a node is replaced by a BNode (blank node), then it could be a pattern
# TODO: to describe a set of command line where only some terms change.
# TODO: The string should probably be encoded in B64 and prefixed for example by "B64".
# TODO: If the input string also starts with "B64", it must be encoded no matter what.
# TODO: It should happen very rarely, so should not be annoying,
# TODO: HOW TO SORT ARGUMENTS ? Several solutions:
# TODO: Property "argv?key=1"
# TODO: This must be consistent with sorting filenames in SVG tables.
# TODO: The key must be stripped when processing collapsed properties.
# TODO: Rename "collapsed properties" to "tabular properties" and use the same concept for reports,
# TODO: Because this is a similar problem of sorting successive values with a key,
# TODO: the difference being that all values come at once, instead of successive values indexed by time.
node_cmd_line = _add_command_line_and_executable(grph, node, proc_obj)
proc_cwd, proc_msg = PsutilProcCwd(proc_obj)
# This tells that argv values are displayed in tabular form, in a HTML table, instead of distinct nodes.
lib_properties.add_property_metadata_to_graph(grph, pc.property_argv, pc.meta_property_collapsed)
cmd_array = PsutilProcToCmdlineArray(proc_obj)
for argv_index, argv_value in enumerate(cmd_array):
if argv_index == 0:
# No need to display twice the command.
continue
argv_property = pc.property_argv # lib_properties.MakeProp("argv", key=argv_index)
argv_node = _command_line_argument_to_node(proc_cwd, argv_value)
if not argv_node:
# Default literal value if it was not possible to create a node for the value.
# argv_node = rdflib.Literal(argv_value)
# RDFS = ClosedNamespace(
# uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"),
# terms=[
# "Resource", "Class", "subClassOf", "subPropertyOf", "comment", "label",
# "domain", "range", "seeAlso", "isDefinedBy", "Literal", "Container",
# "ContainerMembershipProperty", "member", "Datatype"]
# )
# https://www.w3.org/TR/rdf-schema/#ch_bag
# The rdf:Seq class is the class of RDF 'Sequence' containers.
# It is a subclass of rdfs:Container.
# Whilst formally it is no different from an rdf:Bag or an rdf:Alt,
# the rdf:Seq class is used conventionally to indicate to a human reader
# that the numerical ordering of the container membership properties of the container is intended to be significant.
# rdf:value is an instance of rdf:Property that may be used in describing structured values.
# pc.property_information is the key for sorting nodes of a given property and object.
# This could be a parameter of a collapsed property..
# TODO: This might also be ...
# TODO: ... an IP address.
# TODO: ... an IP address fillowed by a port number.
argv_node = rdflib.Literal(argv_value)
argv_keyed_node = rdflib.BNode()
grph.add((argv_keyed_node, RDF.value, argv_node))
grph.add((argv_keyed_node, pc.property_information, rdflib.Literal(argv_index)))
grph.add((node_cmd_line, argv_property, argv_keyed_node))
def AddInfo(grph, node, entity_ids_arr):
pid_proc = entity_ids_arr[0]
exec_node = None
grph.add((node, pc.property_pid, rdflib.Literal(pid_proc)))
try:
proc_obj = psutil.Process(int(pid_proc))
_add_command_line_and_executable(grph, node, proc_obj)
# A node is created with the returned string which might as well be
# an error message, which must be unique. Otherwise all faulty nodes
# would be merged.
# TODO: Problem, this node is still clickable. We should return a node
# of this same type, but with a faulty state, which would make it unclickable.
user_name = PsutilProcToUser(proc_obj, "User access denied:PID=%s" % pid_proc)
# TODO: Should add the hostname to the user ???
user_name_host = lib_common.format_username(user_name)
user_node = lib_uris.gUriGen.UserUri(user_name_host)
grph.add((node, pc.property_user, user_node))
sz_resid_set_sz = PsutilResidentSetSize(proc_obj)
grph.add((node, lib_common.MakeProp("Resident Set Size"), rdflib.Literal(sz_resid_set_sz)))
sz_virst_mem_sz = PsutilVirtualMemorySize(proc_obj)
grph.add((node, lib_common.MakeProp("Virtual Memory Size"), rdflib.Literal(sz_virst_mem_sz)))
except Exception as exc:
logging.error("CIM_Process.AddInfo. Caught:%s", exc)
grph.add((node, pc.property_information, rdflib.Literal(str(exc))))
# Needed for other operations.
return exec_node
def Usable(entity_type, entity_ids_arr):
"""This should apply to all scripts in the subdirectories: If the process does not exist,
they should not be displayed by entity.py . The process must be running"""
pid_proc = entity_ids_arr[0]
return psutil.pid_exists(int(pid_proc))
def SelectFromWhere(where_key_values):
"""This must return at least the properties defined in the ontology.
There is no constraints on the other properties, so the query can return any set of key-value pairs,
if the minimal set of properties is there."""
# TODO: Add "select_attributes"
logging.debug("CIM_Process SelectFromWhere where_key_values=%s", str(where_key_values))
for proc_obj in psutil.process_iter():
user_name = PsutilProcToUser(proc_obj,None)
if user_name:
user_name_host = lib_common.format_username(user_name)
else:
user_name_host = user_name
parent_pid = proc_obj.ppid()
if "Handle" in where_key_values and str(where_key_values["Handle"]) != str(proc_obj.pid):
continue
if "user" in where_key_values and where_key_values["user"] != user_name_host:
continue
if "parent_pid" in where_key_values and str(where_key_values["parent_pid"]) != str(parent_pid):
continue
# TODO: Should reuse the existing properties.
ret_value = {
lib_properties.MakeProp("Handle"): rdflib.Literal(proc_obj.pid),
lib_properties.MakeProp("username"): rdflib.Literal(user_name_host),
lib_properties.MakeProp("parent_pid"): rdflib.Literal(parent_pid)}
yield ret_value
|
"""
Aircraft layout business logic
"""
import sqlalchemy as db
from sqlalchemy.orm import joinedload
from sqlalchemy.exc import IntegrityError, NoResultFound
from .seat_allocations import allocate_available_seats, copy_seat_allocations, get_current_seat_allocations, \
remove_seats
from ..model import Session, AircraftLayout, Flight, Seat
def _retrieve_and_validate_new_layout(flight_id, aircraft_layout_id):
"""
Retrieve an aircraft layout and confirm that it's suitable to be applied to a given flight
:param flight_id: ID for the flight to validate the layout for
:param aircraft_layout_id: ID for the aircraft layout
:return: Instance of the AircraftLayout with the specified ID
"""
with Session.begin() as session:
flight = session.query(Flight).get(flight_id)
aircraft_layout = session.query(AircraftLayout).get(aircraft_layout_id)
if flight.airline_id != aircraft_layout.airline_id:
raise ValueError("Aircraft layout is not associated with the airline for the flight")
if flight.aircraft_layout_id == aircraft_layout_id:
raise ValueError("New aircraft layout is the same as the current aircraft layout")
if aircraft_layout.capacity < flight.passenger_count:
raise ValueError("Aircraft layout doesn't have enough seats to accommodate all passengers")
return aircraft_layout
def _create_seats_from_layout(flight_id, aircraft_layout):
"""
Apply an aircraft layout to the specified flight
:param flight_id: ID for the flight to apply the layout to
:param aircraft_layout: AircraftLayout instance to apply
"""
with Session.begin() as session:
flight = session.query(Flight).get(flight_id)
# Iterate over the row definitions and the seat letters in each, adding a seat in association with the flight
for row_definition in aircraft_layout.row_definitions:
# Iterate over the seats in the row, adding each to the flight
for seat_letter in row_definition.seats:
seat = Seat(flight=flight, seat_number=f"{row_definition.number}{seat_letter}")
session.add(seat)
# Make the association between flight and layout
flight.aircraft_layout = aircraft_layout
def apply_aircraft_layout(flight_id, aircraft_layout_id):
"""
Apply an aircraft layout to a flight, copying across seat allocations
:param flight_id: ID of the flight to apply the layout to
:param aircraft_layout_id: ID of the aircraft layout to apply
"""
# TODO : This needs refactoring but works well enough as a demo for now
# Get the aircraft layout and make sure it's valid for the specified flight
aircraft_layout = _retrieve_and_validate_new_layout(flight_id, aircraft_layout_id)
# Get the current seating allocations and remove the existing seats
current_allocations = get_current_seat_allocations(flight_id)
remove_seats(flight_id)
# Create the new seats
_create_seats_from_layout(flight_id, aircraft_layout)
# Copy seating allocations across
not_allocated = copy_seat_allocations(flight_id, current_allocations)
# It's possible some seats don't exist in the new layout compared to the old. If there are any passengers
# who were in those seats, move them to the next available seats
if not_allocated:
allocate_available_seats(flight_id, not_allocated)
def list_layouts(airline_id=None):
"""
List of aircraft layouts for an airline
:param airline_id: ID of the airline for which to load aircraft layouts (or None to list all layouts)
:return: A list of Aircraft layout instances with eager loading of related entities
"""
with Session.begin() as session:
if airline_id:
layouts = session.query(AircraftLayout) \
.options(joinedload(AircraftLayout.airline)) \
.filter(AircraftLayout.airline_id == airline_id) \
.order_by(db.asc(AircraftLayout.aircraft),
db.asc(AircraftLayout.name)) \
.all()
else:
layouts = session.query(AircraftLayout) \
.options(joinedload(AircraftLayout.airline)) \
.order_by(db.asc(AircraftLayout.aircraft),
db.asc(AircraftLayout.name)) \
.all()
return layouts
def get_layout(layout_id):
"""
Get the aircraft layout with the specified ID
:param layout_id: ID of the aircraft layout to return
:return: AircraftLayout instance for the specified layout record
:raises ValueError: If the layout doesn't exist
"""
with Session.begin() as session:
layout = session.query(AircraftLayout) \
.options(joinedload(AircraftLayout.airline)) \
.get(layout_id)
if layout is None:
raise ValueError("Aircraft layout not found")
return layout
def create_layout(airline_id, aircraft_model, layout_name):
"""
Create a new aircraft layout with the specified properties
:param airline_id: ID for the airline associated with the layout
:param aircraft_model: Aircraft model e.g. A321
:param layout_name: Layout name e.g. Neo
"""
with Session.begin() as session:
aircraft_layout = AircraftLayout(airline_id=airline_id,
aircraft=aircraft_model,
name="" if layout_name is None else layout_name)
session.add(aircraft_layout)
return aircraft_layout
def update_layout(layout_id, aircraft_model, layout_name):
"""
Update the core details for an aircraft layout
:param layout_id: ID for the aircraft layout to update
:param aircraft_model: Aircraft model e.g. A321
:param layout_name: Layout name e.g. Neo
:raises ValueError: If the edit would result in a duplicate layout or the layout doesn't exist
"""
try:
with Session.begin() as session:
aircraft_layout = session.query(AircraftLayout)\
.filter(AircraftLayout.id == layout_id)\
.one()
aircraft_layout.aircraft = aircraft_model
aircraft_layout.name = layout_name
except NoResultFound as e:
raise ValueError("Aircraft layout not found") from e
except IntegrityError as e:
raise ValueError("Cannot update aircraft layout as this would create a duplicate") from e
def delete_layout(layout_id):
"""
Delete the airport with the specified ID
:param layout_id: ID of the aircraft layout to delete
:raises ValueError: If the layout is still referenced
"""
try:
with Session.begin() as session:
layout = session.query(AircraftLayout).get(layout_id)
session.delete(layout)
except IntegrityError as e:
raise ValueError("Cannot delete an aircraft layout that is referenced by a flight") from e
|
from tqdm import tqdm_notebook
import pandas as pd
import numpy as np
def _get_path_to_src(end_node, parents_dict):
output = []
src = end_node
while src != -1:
output.append(src)
src = parents_dict[src]
return list(reversed(output))
def find_cycles(edges, molecule_name, max_depth):
node = 0
stack = []
cycle_array = []
_find_cycles(node, stack, edges, molecule_name, max_depth, cycle_array)
cycle_sets = []
output = []
for cycle in cycle_array:
if set(cycle) in cycle_sets:
continue
cycle_sets.append(set(cycle))
output.append(cycle)
return output
def _find_cycles(node, stack, edges, molecule_name, max_depth, cycle_array):
"""
dfs is implemented to get cycles
"""
# cycle
if node in stack:
idx = stack.index(node)
cycle_array.append(stack[idx:].copy())
return
if len(stack) >= max_depth:
return
stack.append(node)
key = (molecule_name, node)
if key not in edges:
# print('Absent key', key)
stack.pop()
return
for child in edges[key]:
# we don't want to use the same edge in reverse fashion
if len(stack) > 1 and child == stack[-2]:
continue
_find_cycles(child, stack, edges, molecule_name, max_depth, cycle_array)
stack.pop()
return
def bfs_for_neighbors(nodes, parents_dict, edges, molecule_name, kneighbor_dict, depth, max_depth):
"""
stack is filled with the sequence of nodes from atom_index_0 to atom_index_1
"""
# We store neighbors
if depth > 0:
kneighbor_dict[depth] = nodes
if depth >= max_depth:
return
next_layer = []
for parent_node in nodes:
key = (molecule_name, parent_node)
if key not in edges:
# print('Absent key', key)
continue
for child in edges[key]:
if child in parents_dict or child in next_layer:
continue
next_layer.append(child)
parents_dict[child] = parent_node
return bfs_for_neighbors(next_layer, parents_dict, edges, molecule_name, kneighbor_dict, depth + 1, max_depth)
def bfs(nodes, parents_dict, target, edges, molecule_name, depth, max_depth):
"""
stack is filled with the sequence of nodes from atom_index_0 to atom_index_1
"""
if depth >= max_depth:
return []
next_layer = []
for parent_node in nodes:
if parent_node == target:
return _get_path_to_src(target, parents_dict)
key = (molecule_name, parent_node)
if key not in edges:
# print('Absent key', key)
continue
for child in edges[key]:
if child in parents_dict or child in next_layer:
continue
next_layer.append(child)
parents_dict[child] = parent_node
return bfs(next_layer, parents_dict, target, edges, molecule_name, depth + 1, max_depth)
def get_neighbor_atoms(edges_df, X_df, max_path_len=5):
"""
Returns Neighbor atom_indices for each atom_index present in X_df
"""
# assuming symmetric edges and atom information.
# id to column
X_df = X_df.reset_index()
edges = edges_df.groupby(['molecule_name', 'atom_index_0'])['atom_index_1'].apply(list).to_dict()
data1 = X_df[['id', 'molecule_name', 'atom_index_0',
'atom_index_1']].groupby(['molecule_name', 'atom_index_0']).first().reset_index()
data1.rename({'atom_index_0': 'atom_index'}, axis=1, inplace=True)
data1.drop('atom_index_1', axis=1, inplace=True)
data2 = X_df[['id', 'molecule_name', 'atom_index_0',
'atom_index_1']].groupby(['molecule_name', 'atom_index_1']).first().reset_index()
data2.rename({'atom_index_1': 'atom_index'}, axis=1, inplace=True)
data2.drop('atom_index_0', axis=1, inplace=True)
data = pd.concat([data1, data2])
data = data.groupby(['molecule_name', 'atom_index']).first().reset_index()
data = data[['id', 'molecule_name', 'atom_index']].values
kneighbor_output = []
for row in tqdm_notebook(data):
idx = row[0]
mn = row[1]
s = row[2]
parents_dict = {s: -1}
kneighbor_dict = {}
bfs_for_neighbors([s], parents_dict, edges, mn, kneighbor_dict, 0, max_path_len)
nbr_depth = []
nbr_ai = []
for dep, kneighbors in kneighbor_dict.items():
nbr_depth += [dep] * len(kneighbors)
nbr_ai += kneighbors
# id,atom_index,neighbor_bond_distance,atom_index
kneighbor_output += [np.vstack([[idx] * len(nbr_ai), [s] * len(nbr_ai), nbr_depth, nbr_ai]).T]
kneighbor_df = pd.DataFrame(
np.concatenate(kneighbor_output),
columns=['id', 'atom_index', 'nbr_distance', 'nbr_atom_index'],
dtype=np.int32)
kneighbor_df[['atom_index', 'nbr_distance',
'nbr_atom_index']] = kneighbor_df[['atom_index', 'nbr_distance', 'nbr_atom_index']].astype(np.uint8)
return kneighbor_df
def get_cycle_data(edges_df, structures_df):
"""
Returns cycles present in the structure. each row corresponds to one cycle.
"""
edges = edges_df.groupby(['molecule_name', 'atom_index_0'])['atom_index_1'].apply(list).to_dict()
molecule_names = structures_df.molecule_name.unique()
max_depth = 50
# note that 9 is the maximum number of non H atoms in the problem
max_cycle_len = 10
output = []
for mn in molecule_names:
cycles = find_cycles(edges, mn, max_depth)
for cycle in cycles:
assert len(cycle) <= max_cycle_len
row = [mn] + cycle + [-1] * (max_cycle_len - len(cycle))
output.append(row)
cols = ['molecule_name'] + list(map(str, list(range(10))))
df = pd.DataFrame(output, columns=cols)
df[cols[1:]] = df[cols[1:]].astype(np.int16)
return df.set_index('molecule_name')
def get_intermediate_atoms_link(edges_df, X_df, max_path_len=10):
"""
Returns In the molecule structure, what atoms come in from atom_index_0 to atom_index_1 (in order)
"""
# assuming symmetric edges and atom information.
edges = edges_df.groupby(['molecule_name', 'atom_index_0'])['atom_index_1'].apply(list).to_dict()
data = X_df[['molecule_name', 'atom_index_0', 'atom_index_1']].values
output = [[]] * len(data)
# max path_len -2 atoms between s and e
for idx, row in tqdm_notebook(enumerate(data)):
mn = row[0]
s = row[1]
e = row[2]
parents_dict = {s: -1}
path = bfs([s], parents_dict, e, edges, mn, 0, max_path_len)
output[idx] = path + [-1] * (max_path_len - len(path))
return pd.DataFrame(output, index=X_df.index, dtype=np.int16)
|
""" Utilities for sending email messages through SMTP.
:author: Sana Development Team
:version: 2.0
"""
import urllib
import logging
from django.conf import settings
from django.core.mail import send_mail
__all__ = ["send_review_notification",]
#TODO These should be saved as settings or a global config table
link = "https://0.0.0.0/mds/admin/core/%s/%d/"
mobile_link = "https://0.0.0.0/mds/core/mobile/%s/%s/"
def send_review_notification(instance,addresses,subject,
replyTo=settings.SMTP_REPLYTO,
template=settings.REVIEW_POST_TEMPLATE,
auth_user=settings.EMAIL_HOST_USER,
auth_password=settings.EMAIL_HOST_PASSWORD):
""" Formats and sends and email message which include a url for reviewing
an uploaded encounter.
"""
try:
url = link % (instance.__class__.__name__.lower(), instance.id)
mobile_url = mobile_link % (instance.__class__.__name__.lower(), instance.uuid)
logging.debug("review link %s" % url)
#urlencodred = urllib.urlencode(url)
#print urlencoded)
message = template % (url,mobile_url)
logging.debug("Review message:\n %s" % message)
print message
send_mail(subject, message, settings.SMTP_REPLYTO,addresses,
fail_silently=False,
auth_user=auth_user,
auth_password=auth_password)
for address in addresses:
print address
logging.info("Review notification sent successfully to %s" % address)
result = True
except:
logging.error("Review notification send failed!")
result = False
return result
|
# Generated by Django 2.1.7 on 2020-03-12 21:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0006_user_approved'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ['-updated']},
),
migrations.AlterModelOptions(
name='post',
options={'ordering': ['-updated']},
),
]
|
"""
--------------------------------------------------------------------------------------------------------------------
This program creates 2D light effects onto a pygame surface/image (32 bit PNG file encoded with
alpha channels transparency).
The files radial4.png, RadialTrapezoid, RadialWarning are controlling the shape and light intensity
of the illuminated area (radial masks).
The class can be easily implemented into a 2D game (top down or horizontal/vertical scrolling) to enhanced
the atmosphere and lighting environment.
This code comes with a MIT license.
Copyright (c) 2018 Yoann Berenguer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
Please acknowledge and give reference if using the source code for your project
--------------------------------------------------------------------------------------------------------------------
Version 2 changes :
- Added volumetric effect (animated smoke or plasma) in the illuminated area to set a specific ambiance.
This effect can also be used for generating force field around a set point.
- Added warning light (rotational lighting)
- Implemented shadow projection effects from a light source coordinates (See file Shadows.py for more details and
credit to Marcus Møller for its shadow algorithms (https://github.com/marcusmoller).
- Code cleanup and split the code into different modules
Constant.py
LoadTextureFile.py
Shadow.py
LightDemo.py
- ERRATA
05/06/2018 Correction bug
elif x > SIZE[1] - lx:
w_high = SIZE[1] - x
by ++>
elif x > SIZE[0] - lx:
w_high = SIZE[0] - x
in update(self), below statements were wrongly placed.
self.dt = 0
self.counter += 1
Have a nice journey
"""
__author__ = "Yoann Berenguer"
__copyright__ = "Copyright 2007."
__credits__ = ["Yoann Berenguer"]
__license__ = "MIT License"
__version__ = "2.0.0"
__maintainer__ = "Yoann Berenguer"
__email__ = "yoyoberenguer@hotmail.com"
__status__ = "Demo"
import numpy
from numpy import putmask, array, arange, repeat, newaxis
import random
import threading
from Constants import *
from Shadows import Shadow
import time
import multiprocessing
class CreateLight(object):
UPDATE = False
""" Define a light source properties and methods."""
def __init__(self, light_name_, light_shape_, light_shade_, alpha_mask_, light_flickering_, light_variance_,
light_rotating_, light_volume_, start_color_gradient_, end_color_gradient_,
light_intensity_, position_, volume_, mouse_=False):
assert isinstance(light_name_, str), 'Expecting str for ' \
'argument light_name_ got %s ' % type(light_name_)
assert isinstance(light_shape_, tuple), 'Expecting tuple for ' \
'argument light_shape_ got %s ' % type(light_shape_)
assert isinstance(light_shade_, pygame.Color), 'Expecting pygame.Color for ' \
'argument light_shade_ got %s ' % type(light_shade_)
assert isinstance(alpha_mask_, (numpy.ndarray, list)), 'Expecting numpy.ndarray or list for ' \
'argument alpha_mask_ got %s ' % type(alpha_mask_)
assert isinstance(light_flickering_, bool), 'Expecting bool for ' \
'argument light_flickering_ got %s ' % type(light_flickering_)
assert isinstance(light_variance_, bool), 'Expecting bool for ' \
'argument light_variance_ got %s ' % type(light_variance_)
# Light source properties (see module Constants.py for more details about the light source creation)
self.light_name = light_name_
self.light_shape = light_shape_
self.light_shade = light_shade_
self.alpha_mask = alpha_mask_
self.light_flickering = light_flickering_
self.light_variance = light_variance_
self.light_rotating = light_rotating_
self.light_volume = light_volume_
self.start_color_gradient = start_color_gradient_
self.end_color_gradient = end_color_gradient_
self.light_intensity = light_intensity_
self.position = position_
self.volume = volume_
self._id = id(self)
self.counter = 0
self.dt = 0
self.color_index = 0
self.mouse = mouse_
# time between frames default 0ms
# If animation is lagging, increase self.timing e.g 33ms
self.timing = 15
def gradient(self, index_: int)->list:
""" create a color gradient
:param index_: index pointing to a specific color from a color gradient array (linear color gradient)
The color gradient is build from two distinct colors (start_color_gradient and end_color_gradient).
The colors along the line through those points are calculated using linear interpolation
:return row: return a color from a gradient (color at position index_).
"""
assert isinstance(index_, int), \
'Expecting int for argument index_ got %s ' % type(index_)
diff_ = (array(self.end_color_gradient[:3]) - array(self.start_color_gradient[:3]))
row = arange(256, dtype='float') / 256
row = repeat(row[:, newaxis], [3], 1)
diff_ = repeat(diff_[newaxis, :], [256], 0)
row = numpy.add(array(self.start_color_gradient[:3], numpy.float), array((diff_ * row), dtype=numpy.float),
dtype=numpy.float).astype(dtype=numpy.uint8)
return row[index_]
def get_light_spot(self):
""" return numpy.arrays and sizes representing the area flood with light. """
# Light source position (x, y)
x = self.position[0]
y = self.position[1]
# radius
lx = self.light_shape[0] >> 1
ly = self.light_shape[1] >> 1
# Squaring the light source
(w_low, w_high) = lx, lx
(h_low, h_high) = ly, ly
# Reshaping if close to the border(s).
if x < lx:
w_low = x
elif x > SIZE[0] - lx:
w_high = SIZE[0] - x
if y < ly:
h_low = y
elif y > SIZE[1] - ly:
h_high = SIZE[1] - y
if isinstance(self.alpha_mask, list):
mask = self.alpha_mask[0]
else:
mask = self.alpha_mask
# Different method but not faster
# rect = pygame.Rect(x-w_low, y-h_low, x + w_high, y + h_high)
# surface = pygame.Surface((300, y + h_high), pygame.SRCALPHA, 32)
# surface.blit(TEXTURE1, (0, 0), rect)
# surface_chunk = pygame.surfarray.array3d(surface)
# return surface_chunk, \
# mask[lx - w_low:lx + w_high, ly - h_low:ly + h_high, :], \
# (w_low + w_high, h_low + h_high)
return RGB1[x - w_low:x + w_high, y - h_low:y + h_high, :], \
mask[lx - w_low:lx + w_high, ly - h_low:ly + h_high, :], \
(w_low + w_high, h_low + h_high)
def spotlight(self, rgb_array: numpy.array, alpha_array: pygame.Color, color_index_):
"""
Represent the light source with all its properties. (Variance, flickering aspect, rotating light,
volume)
:param rgb_array: numpy.ndarray representing the area flood with light
:param alpha_array: numpy.ndarray representing the mask alpha (radial light intensity, check the mask type)
:param color_index_: Index for the color gradient.
"""
"""
assert isinstance(rgb_array, numpy.ndarray), \
'Expecting numpy.ndarray for argument rgb_array got %s ' % type(rgb_array)
assert isinstance(alpha_array, numpy.ndarray), \
'Expecting numpy.ndarray for argument alpha_array got %s ' % type(alpha_array)
assert isinstance(color_index_, int), \
'Expecting int for argument color_index_ got %s ' % type(color_index_)
"""
color = self.light_shade[:3]
# progressive color change from two distinct colors (see Constants.py e.g LIGHT definition.)
if self.light_variance:
color = self.gradient(index_=color_index_)
# self explanatory
elif self.light_flickering:
if random.randint(0, 1000) > 950:
color = [color[0] >> 1, color[1] >> 1, color[2] >> 1]
# Rotate the light with pre-calculated masks alpha.
if self.light_rotating:
if isinstance(self.alpha_mask, list):
alpha_array = self.alpha_mask[self.counter % (len(self.alpha_mask) - 1)]
# Add texture to the light for volumetric aspect.
# The texture is loaded in the main loop and played sequentially (self.counter)
# (self.light_volume and not self.mouse) --> if the mouse goes outside of the main window, the shape of
# alpha_array and rgb_array will not match the array shape of the texture define by self.volume.
# In short, the volumetric effect will be disable for dynamic light using the mouse position.
# todo pixels3d / array3d choose the best format according to surface
if self.logic1:
volume_array = numpy.divide(self.V0[self.counter % len(self.volume)], 25)
args = alpha_array * self.light_intensity * color * volume_array
else:
args = alpha_array * self.light_intensity * color
# light resultant calculation
new_array = numpy.multiply(rgb_array, args) #.astype(numpy.uint16)
# Cap the array
putmask(new_array, new_array > 255, 255)
# putmask(new_array, new_array < 0, 0)
# Build a 3d array (model RGB + A)
new = numpy.dstack((new_array, alpha_array))
# Build the pygame surface (RGBA model)
self.image = pygame.image.frombuffer(new.transpose(1, 0, 2).copy('C').astype(numpy.uint8),
(new.shape[:2][0], new.shape[:2][1]), 'RGBA')
def flickering(self, rgb_array, alpha_array):
assert isinstance(rgb_array, numpy.ndarray), \
'Expecting numpy.ndarray for argument rgb_array got %s ' % type(rgb_array)
assert isinstance(alpha_array, numpy.ndarray), \
'Expecting numpy.ndarray for argument alpha_array got %s ' % type(alpha_array)
color = [self.light_shade[0] >> 1, self.light_shade[1] >> 1, self.light_shade[2] >> 1 ]
new_array = numpy.multiply(rgb_array, alpha_array * self.light_intensity * color,
dtype=numpy.float)
putmask(new_array, new_array > 255, 255)
# putmask(new_array, new_array < 0, 0)
new = numpy.dstack((new_array, alpha_array))
return pygame.image.frombuffer(new.transpose(1, 0, 2).copy('C').astype(numpy.uint8),
(new.shape[:2][0], new.shape[:2][1]), 'RGBA')
def offset_calculation(self):
if self.image.get_size() != self.light_shape:
w, h = self.image.get_size()
self.offset = pygame.math.Vector2(x=self.light_shape[0] - w
if self.position[0] <= SCREENRECT.centerx >> 1 else (self.light_shape[0] - w) * -1,
y=self.light_shape[1] - h if self.position[1] <= SCREENRECT.centery >> 1
else (self.light_shape[1] - h) * -1)
class ShowLight(pygame.sprite.Sprite, CreateLight):
containers = None
images = None
def __init__(self, light_settings):
pygame.sprite.Sprite.__init__(self, self.containers)
CreateLight.__init__(self, *light_settings)
assert isinstance(SCREENRECT, pygame.Rect), \
'\n[-] SCREENRECT must be a pygame.Rect'
print('[+] %s started' % self.light_name)
self.offset = pygame.math.Vector2(0, 0)
self.image = ShowLight.images
self.rect = self.image.get_rect()
self.chunk, self.alpha, surface_size = self.get_light_spot()
# pre assembled logic
self.logic = self.light_variance or self.light_rotating or self.light_volume
self.logic1 = self.light_volume and not self.mouse
self.V0 = []
if not self.mouse:
if self.light_volume:
i = 0
# process the volumetric texture(resizing) to match the light flooded area.
for surface in self.volume:
self.volume[i] = pygame.transform.smoothscale(surface, (surface_size[0], surface_size[1]))
i += 1
# transform the surface into a numpy array
for surface in self.volume:
self.V0.append(pygame.surfarray.pixels3d(surface))
self.spotlight(self.chunk, self.alpha, 0)
self.image_copy = self.image.copy()
if self.light_flickering:
self.image_flickering = self.flickering(self.chunk, self.alpha)
self.offset_calculation()
self.rect = self.image.get_rect(center=self.position + self.offset / 2)
self.factor = 1
def update(self):
if self.dt > self.timing:
# mouse cursor is a dynamic light source
# and thus the area re-calculated every frames with 'self.spotlight'
if self.mouse:
self.position = MOUSE_POS
self.spotlight(*self.get_light_spot()[:2], self.color_index)
self.offset.x, self.offset.y = (0, 0)
self.offset_calculation()
self.rect = self.image.get_rect(center=self.position + self.offset / 2)
# static light source
else:
# following effects require a constant re-calculation of the light flooded area.
# self.logic = self.light_variance or self.light_rotating or self.light_volume
if self.logic:
self.spotlight(self.chunk, self.alpha, self.color_index)
elif self.light_flickering:
if random.randint(0, 1000) > 950:
self.image = self.image_flickering
else:
self.image = self.image_copy
self.rect = self.image.get_rect(center=self.position + self.offset / 2)
self.color_index += self.factor
if self.color_index > 254 or self.color_index < 1:
self.factor *= -1
self.dt = 0
self.counter += 1
CreateLight.UPDATE = True
self.dt += TIME_PASSED_SECONDS
if __name__ == '__main__':
numpy.set_printoptions(threshold=numpy.nan)
SCREEN.blit(TEXTURE1, (0, 0))
pygame.display.flip()
LIGHT_GROUP = pygame.sprite.Group()
All = pygame.sprite.RenderUpdates()
ShowLight.containers = LIGHT_GROUP, All
# create a dummy surface
ShowLight.images = pygame.Surface((1, 1), 32)
for light in LIGHTS:
if light[0] == 'Spotlight5':
threading.Timer(random.randint(2, 7), ShowLight, args=(light,)).start()
else:
ShowLight(light)
def segment_adjustment(polygon):
segments = ALL_SEGMENTS.copy()
for seg in polygon:
segments.remove(seg)
return segments
# list(map(lambda x: LIGHT1_SEGMENTS.remove(x), list(POLYGON2)))
# Project shadows for specific light sources
shadows = [Shadow(segment_adjustment(POLYGON2), static_=True, location_=(370, 94)), # LIGHT1
Shadow(segment_adjustment(POLYGON1), static_=True, location_=(150, 185)), # LIGHT6
Shadow(ALL_SEGMENTS, static_=True, location_=(333, 595)) # LIGHT5
]
clock = pygame.time.Clock()
global UPDATE
UPDATE = False
while not STOP_GAME:
pygame.event.pump()
while PAUSE:
event = pygame.event.wait()
keys = pygame.key.get_pressed()
if keys[pygame.K_PAUSE]:
PAUSE = False
pygame.event.clear()
keys = None
break
for event in pygame.event.get():
keys = pygame.key.get_pressed()
if event.type == pygame.QUIT or keys[pygame.K_ESCAPE]:
print('Quitting')
STOP_GAME = True
elif event.type == pygame.MOUSEMOTION:
MOUSE_POS = event.pos
elif keys[pygame.K_PAUSE]:
PAUSE = True
print('Paused')
All.update()
if CreateLight.UPDATE:
SCREEN.fill((0, 0, 0, 255))
SCREEN.blit(TEXTURE1, (0, 0))
All.draw(SCREEN)
CreateLight.UPDATE = False
for shadow in shadows:
shadow.update(MOUSE_POS)
shadow.render_frame()
pygame.display.flip()
# print(round(clock.get_fps()))
TIME_PASSED_SECONDS = clock.tick()
FRAME += 1
pygame.quit()
|
# -*- coding:utf-8 -*-
'''
@Author: LiamTTT
@Project: TCTPreprocess.py
@File: slide_reader.py
@Date: 2021/9/10
@Time: 10:22
@Desc: read slides
'''
import os
import openslide
import numpy as np
from loguru import logger
from collections import Iterable
from openslide import OpenSlide
from thirdparty.slide_API import Sdpc, Srp
__all__ = [
"SlideReader",
"mpp_transformer",
"get_attrs"
]
class SlideReader:
handle = None
suffix = None
attrs = None
_path = None
def __init__(self):
pass
@property
def path(self):
return self._path
def open(self, path):
if path == self._path or path is None:
return
else:
self.close()
try:
self.suffix = os.path.splitext(path)[-1]
if self.suffix == ".sdpc":
self.handle = Sdpc()
self.handle.open(path)
elif self.suffix == ".srp":
self.handle = Srp()
self.handle.open(path)
elif self.suffix == ".svs" or self.suffix == ".mrxs":
self.handle = OpenSlide(path)
else:
raise ValueError("File type: {} is not supported.".format(self.suffix))
self._path = path
except Exception as e:
logger.error(f'{e}\nNote: some system requires absolute path for wsi image.')
self.close()
def close(self):
self._path = None
if self.handle is None:
return
self.handle.close()
self.handle = None
def get_attrs(self):
self.attrs = get_attrs(self)
return self.attrs
def get_tile(self, location: tuple, size: tuple, level: int):
""" get tile from slide
:param location: (x, y) at level 0
:param size: (w, h) at level 0
:param level: read in level
:return: RGB img array
"""
if level == 0:
return self.get_tile_for_level0(location, size)
# ensure the attrs had been created
if self.attrs:
pass
else:
self.get_attrs()
# main operations for reading tile
tile_size = mpp_transformer(size, self.attrs["mpp"], self.attrs["mpp"] * (self.attrs["level_ratio"] ** level))
if self.suffix in ['.sdpc', '.srp']:
# parts exceed right and bottom is filled with (255, 255, 255)
location = mpp_transformer(location,
self.attrs["mpp"], self.attrs["mpp"] * (self.attrs["level_ratio"] ** level))
tile = self.handle.getTile(level, location[0], location[1], tile_size[0], tile_size[1])
elif self.suffix in ['.svs', '.mrxs']:
# parts exceed right and bottom is filled with (0, 0, 0)
tile = np.array(self.handle.read_region(location, level, tile_size).convert('RGB'))
return tile
def get_tile_for_level0(self, location: tuple, size: tuple):
""" get tile from slide in level 0
:param location: (x, y) at level 0
:param size: (w, h) at level 0
:return: RGB img array
"""
# main operations for reading tile
if self.suffix in ['.sdpc', '.srp']:
# parts exceed right and bottom is filled with (255, 255, 255)
tile = self.handle.getTile(0, location[0], location[1], size[0], size[1])
elif self.suffix in ['.svs', '.mrxs']:
# parts exceed right and bottom is filled with (0, 0, 0)
tile = np.array(self.handle.read_region(location, 0, size).convert('RGB'))
return tile
def __del__(self):
self.close()
# == Aux Functions ==
def get_attrs(wsi_handle):
attrs = {}
try:
if wsi_handle.suffix in [".sdpc", ".srp"]:
attrs = wsi_handle.handle.getAttrs()
attrs["bound_init"] = (0, 0)
attrs["level_ratio"] = 2
elif wsi_handle.suffix in ['.svs', '.mrxs']:
attrs = get_openslide_attrs(wsi_handle.handle)
except Exception as e:
logger.error(e)
return attrs
def mpp_transformer(origin, origin_mpp, aim_mpp):
"""transform numbers according to mpp
:param origin: original numbers
:param origin_mpp: original mpp
:param aim_mpp: aim mpp
:return: transed numbers
"""
if isinstance(origin, Iterable):
transed = []
for num in origin:
transed.append(int(np.around(num * origin_mpp / aim_mpp)))
else:
transed = int(np.around(origin * origin_mpp / aim_mpp))
return transed
def get_openslide_attrs(handle):
if ".mrxs" in handle._filename:
attrs = {
"mpp": float(handle.properties[openslide.PROPERTY_NAME_MPP_X]),
"level": handle.level_count,
"width": int(handle.properties[openslide.PROPERTY_NAME_BOUNDS_WIDTH]),
"height": int(handle.properties[openslide.PROPERTY_NAME_BOUNDS_HEIGHT]),
"bound_init": (int(handle.properties[openslide.PROPERTY_NAME_BOUNDS_X]),
int(handle.properties[openslide.PROPERTY_NAME_BOUNDS_Y])),
"level_ratio": int(handle.level_downsamples[1])
}
elif ".svs" in handle._filename:
try:
attrs = {
"mpp": float(handle.properties[openslide.PROPERTY_NAME_MPP_X]),
"level": handle.level_count,
"width": int(handle.dimensions[0]),
"height": int(handle.dimensions[1]),
"bound_init": (0, 0),
"level_ratio": int(handle.level_downsamples[1])
}
except KeyError:
attrs = {
"mpp": float(handle.properties['aperio.MPP'].strip(';')),
"level": handle.level_count,
"width": int(handle.dimensions[0]),
"height": int(handle.dimensions[1]),
"bound_init": (0, 0),
"level_ratio": int(handle.level_downsamples[1])
}
return attrs
if __name__ == '__main__':
dir = 'path to slide (.mrxs .svs .sdpc .srp)'
handle = SlideReader()
handle.open(dir)
img = handle.get_tile((8000, 8000), (500, 500), 1)
print(handle.attrs)
handle.close()
|
"""
See http://pbpython.com/pandas-google-forms-part1.html for more details
and explanation of how to create the SECRETS_FILE
Purpose of this example is to pull google sheet data into
a pandas DataFrame.
"""
from __future__ import print_function
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
import pandas as pd
import json
SCOPE = ["https://spreadsheets.google.com/feeds"]
SECRETS_FILE = "Pbpython-key.json"
SPREADSHEET = "PBPython User Survey (Responses)"
# Based on docs here - http://gspread.readthedocs.org/en/latest/oauth2.html
# Load in the secret JSON key (must be a service account)
json_key = json.load(open(SECRETS_FILE))
# Authenticate using the signed key
credentials = SignedJwtAssertionCredentials(json_key['client_email'],
json_key['private_key'], SCOPE)
gc = gspread.authorize(credentials)
print("The following sheets are available")
for sheet in gc.openall():
print("{} - {}".format(sheet.title, sheet.id))
# Open up the workbook based on the spreadsheet name
workbook = gc.open(SPREADSHEET)
# Get the first sheet
sheet = workbook.sheet1
# Extract all data into a dataframe
data = pd.DataFrame(sheet.get_all_records())
# Do some minor cleanups on the data
# Rename the columns to make it easier to manipulate
# The data comes in through a dictionary so we can not assume order stays the
# same so must name each column
column_names = {'Timestamp': 'timestamp',
'What version of python would you like to see used for the examples on the site?': 'version',
'How useful is the content on practical business python?': 'useful',
'What suggestions do you have for future content?': 'suggestions',
'How frequently do you use the following tools? [Python]': 'freq-py',
'How frequently do you use the following tools? [SQL]': 'freq-sql',
'How frequently do you use the following tools? [R]': 'freq-r',
'How frequently do you use the following tools? [Javascript]': 'freq-js',
'How frequently do you use the following tools? [VBA]': 'freq-vba',
'How frequently do you use the following tools? [Ruby]': 'freq-ruby',
'Which OS do you use most frequently?': 'os',
'Which python distribution do you primarily use?': 'distro',
'How would you like to be notified about new articles on this site?': 'notify'
}
data.rename(columns=column_names, inplace=True)
data.timestamp = pd.to_datetime(data.timestamp)
print(data.head())
|
from django.shortcuts import render , redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from . import forms
from . import models
def login_index(request):
if request.user.is_authenticated:
return redirect('home')
else:
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = forms.LoginForm(request.POST)
# check whether it's valid:
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('home')
messages.success(request,'Te has identificado correctamente.')
else:
messages.error(request,'Tu usuario no esta activo.')
else:
messages.error(request,'Usuario y/o contraseña incorrectas.')
form = forms.LoginForm()
context = {'form': form}
else:
form = forms.LoginForm()
context = {'form': form}
return render(request, 'index.html', context)
def logout_index(request):
logout(request)
return redirect('login')
def registrarse(request):
if request.user.is_authenticated:
return redirect('home')
else:
form = forms.RegisterForm()
if request.method == 'POST':
form = forms.RegisterForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request,'Te has registrado con exito '+ user)
return redirect('login')
else:
messages.error(request,'Error al registrarte')
context = {'form':form}
return render(request, 'register.html', context)
@login_required(login_url='login')
def template_view(request):
item_list = []
context = {'item_list': item_list}
return render(request, 'home.html', context)
@login_required(login_url='login')
def envios(request):
item_list = models.Envio.objects.all()
form_cambio_estado_msg = ""
form_cambio_estado = forms.CambioEstadoForm()
form_filtrar_envios = forms.FiltroEnviosForm()
if request.method == 'POST':
form_cambio_estado = forms.CambioEstadoForm(request.POST)
if form_cambio_estado.is_valid():
codigoSeguim = form_cambio_estado.cleaned_data.get('codigoSeguim')
estado = form_cambio_estado.cleaned_data.get('estado')
#envio = models.Envio.objects.get(codigoSeguim= codigoSeguim)
try:
envio = models.Envio.objects.get(codigoSeguim= codigoSeguim)
except models.Envio.DoesNotExist:
envio = None
if envio:
envio.estado = estado
envio.save()
form_cambio_estado_msg = "Se cambio el estado correctamente."
form_cambio_estado = forms.CambioEstadoForm()
else:
form_cambio_estado_msg = "Error al cambiar el estado"
else:
form_filtrar_envios = forms.FiltroEnviosForm(request.POST)
if form_filtrar_envios.is_valid():
filtro = form_filtrar_envios.cleaned_data.get('filtro')
if (filtro == 'Codigo de seguim'):
item_list = models.Envio.objects.all().order_by('codigoSeguim')
if (filtro == 'Estado'):
item_list = models.Envio.objects.all().order_by('estado')
if (filtro == 'DNI destinatario'):
item_list = models.Envio.objects.all().order_by('dniPersona')
context = {'item_list': item_list, 'form_filtrar_envios': form_filtrar_envios,'form_cambio_estado': form_cambio_estado, 'form_cambio_estado_msg': form_cambio_estado_msg}
return render(request, 'envios.html', context)
|
from __future__ import annotations
RECIPE_FILE = 'recipes.cptk.yaml'
PROJECT_FILE = '.cptk/project.cptk.yaml'
LAST_FILE = '.cptk/stayaway/last.cptk.txt'
MOVE_FILE = '.cptk/moves.cptk.txt'
MOVE_FILE_SEPERATOR = '::'
MOVE_SAFES = ['./', '**/.cptk/**']
LAST_FILE_SEPERATOR = '::'
INPUT_FILE_SUFFIX = '.in'
OUTPUT_FILE_SUFFIX = '.out'
def TEST_NAME_GENERATOR():
n = 1
while True:
yield f'sample{n:02d}'
n += 1
|
# ---------------------------------------------------------------------------- #
# Stackmetric IO - Copyright ©️ 2022 All Rights Reserved. #
# ---------------------------------------------------------------------------- #
def run():
from nltk.tokenize import word_tokenize, sent_tokenize
import streamlit as st
import streamlit.components.v1 as components
from nltk.corpus import stopwords
from typing import ByteString
from wordcloud import WordCloud
import plotly.express as px
import chart_studio
import matplotlib.pyplot as plt
import scipy as sp
import pandas as pd
import numpy as np
import validators
import bs4 as bs
import urllib.request
import requests
import string
import heapq
import time
import nltk
import re
# ---------------------------------------------------------------------------- #
st.header("SUMMARIZER")
st.write(
"""**LEX** is a text summarization tool which allows the user to copy and paste text and summarize its contents. In this section LEX
can accept copied text or an uploaded file and summarize its contents.
This product is currently in development and it is strictly a *proof of concept* and *not optimized for any real time commercial application or insights*. If you encounter any
any inconsistency or error during the runtime, please get back to us with the error and the dataset so that it can be reproduced and solved.
Submit the error message to **support@stackmetric.com** for anything more.
This app is not optimized to summarize less tha 1,000 words and other limitations apply."""
)
model = st.selectbox(
'Please select input method below:',
('---', 'Frequency Based', 'Luhns Algorithm', 'Upload File'))
# option = st.selectbox(
# 'Please select input method below:',
# ('---', 'Input Text', 'Paste a Link','Upload File'))
s_example = "Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of de Finibus Bonorum et Malorum (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, Lorem ipsum dolor sit amet.., comes from a line in section 1.10.32.The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from de Finibus Bonorum et Malorum by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham."
# -------------------------------- Text Input -------------------------------- #
if model == 'Frequency Based':
option = st.selectbox(
'Please select input method below:',
('---', 'Input Text', 'Paste a Link','Upload File'))
if option == 'Input Text':
text_input = st.text_area("Use the example below or input your own \
text in English (between 1,000 and 10,000 characters)", value=s_example, max_chars=10000, height=330)
if st.button('Summarize'):
if len(text_input) < 1000:
st.error('Please enter a text in English of minimum 1,000 \
characters')
else:
with st.spinner('Processing...'):
time.sleep(2)
st.text('')
# Raw Text
text = re.sub(r'\[[0-9]*\]', ' ', text_input)
text = re.sub(r'\s+', ' ', text_input)
nltk.download('punkt')
nltk.download('stopwords')
# Clean Text
clean_text = text.lower()
clean_text = re.sub(r'\W', ' ', clean_text)
clean_text = re.sub(r'\d', ' ', clean_text)
clean_text = re.sub(r'\s+', ' ', clean_text)
stopwords = nltk.corpus.stopwords.words('english')
word_frequency = nltk.FreqDist(nltk.word_tokenize
(clean_text))
# Word Dictionary
word2count = {}
for word in nltk.word_tokenize(clean_text):
if word not in stopwords:
if word not in word2count.keys():
word2count[word] = 1
else:
word2count[word] += 1
highest_frequency = max(word2count.values())
highest_frequency
# Weighted Words
for word in word2count.keys():
word2count[word] = (word2count[word] / highest_frequency)
# Tokenize sentences
sentences = nltk.sent_tokenize(text)
# Sentence Dictionary
sent2score = {}
for sentence in sentences:
for word in nltk.word_tokenize(sentence.lower()):
if word in word2count.keys():
if len(sentence.split(' ')) < 25:
if sentence not in sent2score.keys():
sent2score[sentence] = word2count[word]
else:
sent2score[sentence] += word2count[word]
best_sentences = heapq.nlargest(10, sent2score, key=sent2score.get)
summary = ' '.join(best_sentences)
summary
st.write(summary)
# Wordcloud
st.write("Word Cloud")
st.set_option('deprecation.showPyplotGlobalUse', False)
wordcloud = WordCloud(background_color = "#f2f8fb", width=800, height=400).generate(summary)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
# st.pyplot()
# Plotly Charts #
# --- Lists
# Convert word2count dict to a list
dict2list=list(word2count.items())
# Sort list in descending order
dict2list = sorted(word2count.items(), key=lambda x:x[1], reverse=True)
# First 7 words in sorted list of weighted words in descending order
weighted_words_des = dict2list[:7]
# --- Dicts
# Covert sorted list back to dict (this is the complete dict)
sortdict = dict(dict2list)
# Sorted dict
d=dict(weighted_words_des)
# Separate keys and values
keys = d.keys()
val = d.values()
# Convert keys and values into a list
# This will be our x and y axis for our chart
x_axis = list(keys)
y_axis = list(val)
st.write(highest_frequency)
fig = px.bar(x=x_axis, y=y_axis, labels=dict(x="Words", y="Weight", color="Place"))
st.subheader('Weighted Words')
st.plotly_chart(fig)
highF = "The word that appears the most in the article is " + x_axis[0]
numy = highest_frequency
numx = y_axis[0]
numofx = x_axis[0] + " - It appears {numy} times".format(numy=numy, numx=numx)
numF = x_axis[0] + " also has a weight of {numx} which makes is the most important word in this article".format(numx=numx)
st.subheader(highF)
st.subheader(numofx)
st.subheader(numF)
# END Tex Input #
# -------------------------------- Link Input -------------------------------- #
if option == 'Paste a Link':
source_txt = st.text_input("")
if st.button('Paste a Link'):
if 'https://' in source_txt:
with st.spinner('Processing...'):
time.sleep(2)
# Retrieve data
URL = source_txt
# Open the URL
page = urllib.request.Request(URL)
result = urllib.request.urlopen(page)
# Store the HTML page in a variable
resulttext = result.read()
# Parsing the data/ creating BeautifulSoup object
soup = bs.BeautifulSoup(resulttext, 'lxml')
# Fetching the data
text = ""
for paragraph in soup.find_all('p'):
text += paragraph.text
# Raw Text
text = re.sub(r'\[[0-9]*\]', ' ', text)
text = re.sub(r'\s+', ' ', text)
# Clean Text
clean_text = text.lower()
clean_text = re.sub(r'\W', ' ', clean_text)
clean_text = re.sub(r'\d', ' ', clean_text)
clean_text = re.sub(r'\s+', ' ', clean_text)
nltk.download('punkt')
nltk.download('stopwords')
stopwords = nltk.corpus.stopwords.words('english')
word_frequency = nltk.FreqDist(nltk.word_tokenize(clean_text))
sentences = nltk.sent_tokenize(text)
# Create a `dictionary` and name it word2count where words [keys] and counts [values]
word2count = {}
for word in nltk.word_tokenize(clean_text):
if word not in stopwords:
if word not in word2count.keys():
word2count[word] = 1
else:
word2count[word] += 1
highest_frequency = max(word2count.values())
highest_frequency
for word in word2count.keys():
word2count[word] = (word2count[word] / highest_frequency)
sent2score = {}
for sentence in sentences:
for word in nltk.word_tokenize(sentence.lower()):
if word in word2count.keys():
if len(sentence.split(' ')) < 25:
if sentence not in sent2score.keys():
sent2score[sentence] = word2count[word]
else:
sent2score[sentence] += word2count[word]
best_sentences = heapq.nlargest(10, sent2score, key=sent2score.get)
summary = ' '.join(best_sentences)
summary
st.write(summary)
# ------------------------------- Plotly Charts ------------------------------ #
# ------------ Lists ------------- #
# Convert word2count dict to a list
dict2list=list(word2count.items())
# Sort list in descending order
dict2list = sorted(word2count.items(), key=lambda x:x[1], reverse=True)
# First 7 words in sorted list of weighted words in descending order
weighted_words_des = dict2list[:7]
# ------------ Dicts --------------------- #
# Covert sorted list back to dict (this is the complete dict)
sortdict = dict(dict2list)
# Sorted dict
d=dict(weighted_words_des)
# Separate keys and values
keys = d.keys()
val = d.values()
# Convert keys and values into a list
# This will be our x and y axis for our chart
x_axis = list(keys)
y_axis = list(val)
highF = "The word that appears the most in the article is " + x_axis[0]
numy = highest_frequency
numx = y_axis[0]
numofx = "The word " + x_axis[0] + " appears {numy} times".format(numy=numy, numx=numx)
numF = x_axis[0] + " also has a weight of {numx} which makes is the most important word in this article".format(numx=numx)
st.subheader(highF)
st.subheader(numofx)
st.subheader(numF)
fig = px.bar(x=x_axis, y=y_axis, labels=dict(x="Words", y="Weight", color="Place"))
st.subheader('Weighted Words')
st.plotly_chart(fig)
# END Tex Input #
else:
st.error('Please enter a valid link')
if __name__ == "__main__":
run()
|
from rest_framework import generics
from circuits.models import Provider, CircuitType, Circuit
from circuits.filters import CircuitFilter
from . import serializers
class ProviderListView(generics.ListAPIView):
"""
List all providers
"""
queryset = Provider.objects.all()
serializer_class = serializers.ProviderSerializer
class ProviderDetailView(generics.RetrieveAPIView):
"""
Retrieve a single provider
"""
queryset = Provider.objects.all()
serializer_class = serializers.ProviderSerializer
class CircuitTypeListView(generics.ListAPIView):
"""
List all circuit types
"""
queryset = CircuitType.objects.all()
serializer_class = serializers.CircuitTypeSerializer
class CircuitTypeDetailView(generics.RetrieveAPIView):
"""
Retrieve a single circuit type
"""
queryset = CircuitType.objects.all()
serializer_class = serializers.CircuitTypeSerializer
class CircuitListView(generics.ListAPIView):
"""
List circuits (filterable)
"""
queryset = Circuit.objects.select_related('type', 'provider', 'site', 'interface__device')
serializer_class = serializers.CircuitSerializer
filter_class = CircuitFilter
class CircuitDetailView(generics.RetrieveAPIView):
"""
Retrieve a single circuit
"""
queryset = Circuit.objects.select_related('type', 'provider', 'site', 'interface__device')
serializer_class = serializers.CircuitSerializer
|
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import logging
from nomad.units import ureg
from nomad.parsing.file_parser import TextParser, Quantity, DataTextParser
from nomad.datamodel.metainfo.simulation.run import Run, Program, TimeRun
from nomad.datamodel.metainfo.simulation.method import (
Electronic, Method, XCFunctional, Functional, AtomParameters, DFT,
BasisSet, GW as GWMethod
)
from nomad.datamodel.metainfo.simulation.system import (
System, Atoms
)
from nomad.datamodel.metainfo.simulation.calculation import (
Calculation, BandStructure, BandEnergies, Dos, DosValues,
ScfIteration, Energy, EnergyEntry, Stress, StressEntry, Thermodynamics,
Forces, ForcesEntry
)
from nomad.datamodel.metainfo.workflow import Workflow
from .metainfo.fhi_aims import Run as xsection_run, Method as xsection_method,\
x_fhi_aims_section_parallel_task_assignement, x_fhi_aims_section_parallel_tasks,\
x_fhi_aims_section_controlIn_basis_set, x_fhi_aims_section_controlIn_basis_func,\
x_fhi_aims_section_controlInOut_atom_species, x_fhi_aims_section_controlInOut_basis_func,\
x_fhi_aims_section_vdW_TS
re_float = r'[-+]?\d+\.\d*(?:[Ee][-+]\d+)?'
re_n = r'[\n\r]'
class FHIAimsControlParser(TextParser):
def __init__(self):
super().__init__(None)
@staticmethod
def str_to_unit(val_in):
val = val_in.strip().lower()
unit = None
if val.startswith('a'):
unit = 1 / ureg.angstrom
elif val.startswith('b'):
unit = 1 / ureg.bohr
return unit
def init_quantities(self):
def str_to_species(val_in):
val = val_in.strip().splitlines()
data = []
species = dict()
for v in val:
v = v.strip().split('#')[0]
if not v or not v[0].isalpha():
continue
if v.startswith('species'):
if species:
data.append(species)
species = dict(species=v.split()[1:])
else:
v = v.replace('.d', '.e').split()
vi = v[1] if len(v[1:]) == 1 else v[1:]
if v[0] in species:
species[v[0]].extend([vi])
else:
species[v[0]] = [vi]
data.append(species)
return data
self._quantities = [
Quantity(
xsection_method.x_fhi_aims_controlIn_charge,
rf'{re_n} *charge\s*({re_float})', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_hse_unit,
rf'{re_n} *hse_unit\s*([\w\-]+)', str_operation=self.str_to_unit, repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_hybrid_xc_coeff,
rf'{re_n} *hybrid_xc_coeff\s*({re_float})', repeats=False),
Quantity(
xsection_run.x_fhi_aims_controlIn_MD_time_step,
rf'{re_n} *MD_time_step\s*({re_float})', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_k_grid,
rf'{re_n} *k\_grid\s*([\d ]+)', repeats=False),
Quantity(
'occupation_type',
rf'{re_n} *occupation_type\s*([\w\. \-\+]+)', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_override_relativity,
rf'{re_n} *override_relativity\s*([\.\w]+)', repeats=False),
Quantity(
'relativistic',
rf'{re_n} *relativistic\s*([\w\. \-\+]+)', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_sc_accuracy_rho,
rf'{re_n} *sc_accuracy_rho\s*({re_float})', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_sc_accuracy_eev,
rf'{re_n} *sc_accuracy_eev\s*({re_float})', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_sc_accuracy_etot,
rf'{re_n} *sc_accuracy_etot\s*({re_float})', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_sc_accuracy_forces,
rf'{re_n} *sc_accuracy_forces\s*({re_float})', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_sc_accuracy_stress,
rf'{re_n} *sc_accuracy_stress\s*({re_float})', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_sc_iter_limit,
rf'{re_n} *sc_iter_limit\s*([\d]+)', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_spin,
rf'{re_n} *spin\s*([\w]+)', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlIn_verbatim_writeout,
rf'{re_n} *verbatim_writeout\s*([\w]+)', repeats=False),
Quantity(
'xc',
rf'{re_n} *xc\s*([\w\. \-\+]+)', repeats=False),
Quantity(
'species', rf'{re_n} *(species\s*[A-Z][a-z]?[\s\S]+?)'
r'(?:species\s*[A-Z][a-z]?|Completed|\-{10})',
str_operation=str_to_species, repeats=False)]
class FHIAimsOutParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
units_mapping = {'Ha': ureg.hartree, 'eV': ureg.eV}
def str_to_energy_components(val_in):
val = [v.strip() for v in val_in.strip().splitlines()]
res = dict()
for v in val:
v = v.lstrip(' |').strip().split(':')
if len(v) < 2 or not v[1]:
continue
vi = v[1].split()
if not vi[0][-1].isdecimal() or len(vi) < 2:
continue
unit = units_mapping.get(vi[1], None)
res[v[0].strip()] = float(vi[0]) * unit if unit is not None else float(vi[0])
return res
def str_to_scf_convergence(val_in):
res = dict()
for v in val_in.strip().splitlines():
v = v.lstrip(' |').split(':')
if len(v) != 2:
break
vs = v[1].split()
unit = None
if len(vs) > 1:
unit = units_mapping.get(vs[1], None)
res[v[0].strip()] = float(vs[0]) * unit if unit is not None else float(vs[0])
return res
def str_to_atomic_forces(val_in):
val = [v.lstrip(' |').split() for v in val_in.strip().splitlines()]
forces = np.array([v[1:4] for v in val if len(v) == 4], dtype=float)
return forces * ureg.eV / ureg.angstrom
def str_to_dos_files(val_in):
val = [v.strip() for v in val_in.strip().splitlines()]
files = []
species = []
for v in val[1:]:
if v.startswith('| writing') and 'raw data' in v:
files.append(v.split('to file')[1].strip(' .'))
if 'for species' in v:
species.append(v.split('for species')[1].split()[0])
elif not v.startswith('|'):
break
return files, list(set(species))
def str_to_gw_eigs(val_in):
val = [v.split() for v in val_in.splitlines()]
keys = val[0]
data = []
for v in val[1:]:
if len(keys) == len(v) and v[0].isdecimal():
data.append(v)
data = np.array(data, dtype=float)
data = np.transpose(data)
res = {keys[i]: data[i] for i in range(len(data))}
return res
def str_to_gw_scf(val_in):
val = [v.split(':') for v in val_in.splitlines()]
data = {}
for v in val:
if len(v) == 2:
data[v[0].strip(' |')] = float(v[1].split()[0]) * ureg.eV
if 'Fit accuracy for G' in v[0]:
data['Fit accuracy for G(w)'] = float(v[0].split()[-1])
return data
def str_to_array_size_parameters(val_in):
val = [v.lstrip(' |').split(':') for v in val_in.strip().splitlines()]
return {v[0].strip(): int(v[1]) for v in val if len(v) == 2}
def str_to_species_in(val_in):
val = [v.strip() for v in val_in.splitlines()]
data = []
species = dict()
for i in range(len(val)):
if val[i].startswith('Reading configuration options for species'):
if species:
data.append(species)
species = dict(species=val[i].split('species')[1].split()[0])
elif not val[i].startswith('| Found'):
continue
val[i] = val[i].split(':')
if len(val[i]) < 2:
continue
k = val[i][0].split('Found')[1].strip()
v = val[i][1].replace(',', '').split()
if 'Gaussian basis function' in k and 'elementary' in v:
n_gaussians = int(v[v.index('elementary') - 1])
for j in range(n_gaussians):
v.extend(val[i + j + 1].lstrip('|').split())
v = v[0] if len(v) == 1 else v
if val[i][0] in species:
species[k].extend([v])
else:
species[k] = [v]
data.append(species)
return data
def str_to_species(val_in):
data = dict()
val = [v.strip() for v in val_in.splitlines()]
for i in range(len(val)):
if val[i].startswith('species'):
data['species'] = val[i].split()[1]
elif not val[i].startswith('| Found'):
continue
val[i] = val[i].split(':')
if len(val[i]) < 2:
continue
k = val[i][0].split('Found')[1].strip()
v = val[i][1].replace(',', '').split()
if 'Gaussian basis function' in k and 'elementary' in v:
n_gaussians = int(v[v.index('elementary') - 1])
for j in range(n_gaussians):
v.extend(val[i + j + 1].lstrip('|').split())
v = v[0] if len(v) == 1 else v
if k in data:
data[k].extend([v])
else:
data[k] = [v]
return data
structure_quantities = [
Quantity(
'labels',
rf'(?:Species\s*([A-Z][a-z]*)|([A-Z][a-z]*)\w*{re_n})', repeats=True),
Quantity(
'positions',
rf'({re_float})\s+({re_float})\s+({re_float}) *{re_n}',
dtype=np.dtype(np.float64), repeats=True),
Quantity(
'positions',
rf'atom +({re_float})\s+({re_float})\s+({re_float})',
dtype=np.dtype(np.float64), repeats=True),
Quantity(
'velocities',
rf'velocity\s+({re_float})\s+({re_float})\s+({re_float})',
dtype=np.dtype(np.float64), repeats=True)]
eigenvalues = Quantity(
'eigenvalues',
rf'Writing Kohn\-Sham eigenvalues\.([\s\S]+?State[\s\S]+?)(?:{re_n}{re_n} +[A-RT-Z])',
repeats=True, sub_parser=TextParser(quantities=[
Quantity(
'kpoints',
rf'{re_n} *K-point:\s*\d+ at\s*({re_float})\s*({re_float})\s*({re_float})',
dtype=float, repeats=True),
Quantity(
'occupation_eigenvalue',
rf'{re_n} *\d+\s*({re_float})\s*({re_float})\s*{re_float}',
repeats=True)]))
scf_quantities = [
Quantity(
'date_time', rf'{re_n} *Date\s*:(\s*\d+), Time\s*:(\s*[\d\.]+)\s*',
repeats=False, convert=False),
# TODO add section_eigenvalues to scf_iteration
eigenvalues,
Quantity(
'energy_components',
rf'{re_n} *Total energy components:([\s\S]+?)((?:{re_n}{re_n}|\| Electronic free energy per atom\s*:\s*[Ee\d\.\-]+ eV))',
repeats=False, str_operation=str_to_energy_components, convert=False),
Quantity(
'forces', rf'{re_n} *Total forces\([\s\d]+\)\s*:([\s\d\.\-\+Ee]+){re_n}', repeats=True),
Quantity(
'stress_tensor', rf'{re_n} *Sum of all contributions\s*:\s*([\d\.\-\+Ee ]+{re_n})', repeats=False),
Quantity(
'pressure', r' *\|\s*Pressure:\s*([\d\.\-\+Ee ]+)', repeats=False),
Quantity(
'scf_convergence',
rf'{re_n} *Self-consistency convergence accuracy:([\s\S]+?)(\| Change of total energy\s*:\s*[\d\.\-\+Ee V]+)',
repeats=False, str_operation=str_to_scf_convergence, convert=False),
Quantity(
'humo', r'Highest occupied state \(VBM\) at\s*([\d\.\-\+Ee ]+) (?P<__unit>\w+)',
repeats=False, dtype=float),
Quantity(
'lumo', r'Lowest unoccupied state \(CBM\) at\s*([\d\.\-\+Ee ]+) (?P<__unit>\w+)',
repeats=False, dtype=float),
Quantity(
'fermi_level', rf'{re_n} *\| Chemical potential \(Fermi level\) in (?P<__unit>\w+)\s*:([\d\.\-\+Ee ]+)',
repeats=False, dtype=float)]
def str_to_scf_convergence2(val_in):
val = val_in.split('|')
if len(val) != 7:
return
energy = float(val[3]) * ureg.eV
return {'Change of total energy': energy}
def str_to_hirshfeld(val_in):
val = [v.strip() for v in val_in.strip().splitlines()]
data = dict(atom=val[0])
for v in val[1:]:
if v.startswith('|'):
v = v.strip(' |').split(':')
if v[0][0].isalpha():
key = v[0].strip()
data[key] = []
data[key].extend([float(vi) for vi in v[-1].split()])
return data
calculation_quantities = [
Quantity(
'self_consistency',
r'Begin self\-consistency iteration #\s*\d+([\s\S]+?Total energy evaluation[s:\d\. ]+)',
repeats=True, sub_parser=TextParser(quantities=scf_quantities)),
# different format for scf loop
Quantity(
'self_consistency',
rf'{re_n} *SCF\s*\d+\s*:([ \|\-\+Ee\d\.s]+)', repeats=True,
sub_parser=TextParser(quantities=[Quantity(
'scf_convergence', r'([\s\S]+)', str_operation=str_to_scf_convergence2,
repeats=False, convert=False)])),
Quantity(
'gw_self_consistency', r'GW Total Energy Calculation([\s\S]+?)\-{5}',
repeats=True, str_operation=str_to_gw_scf, convert=False),
Quantity(
'date_time', rf'{re_n} *Date\s*:(\s*\d+), Time\s*:(\s*[\d\.]+)\s*', repeats=False,
convert=False),
Quantity(
'structure',
rf'Atomic structure.*:\s+.*x \[A\]\s*y \[A\]\s*z \[A\]([\s\S]+?Species[\s\S]+?(?:{re_n} *{re_n}| 1\: ))',
repeats=False, convert=False, sub_parser=TextParser(quantities=structure_quantities)),
Quantity(
'structure',
rf'{re_n} *(atom +{re_float}[\s\S]+?(?:{re_n} *{re_n}|\-\-\-))',
repeats=False, convert=False, sub_parser=TextParser(quantities=structure_quantities)),
Quantity(
'lattice_vectors',
rf'{re_n} *lattice_vector([\d\.\- ]+){re_n} *lattice_vector([\d\.\- ]+){re_n} *lattice_vector([\d\.\- ]+)',
unit='angstrom', repeats=False, shape=(3, 3), dtype=float),
Quantity(
'energy',
rf'{re_n} *Energy and forces in a compact form:([\s\S]+?(?:{re_n}{re_n}|Electronic free energy\s*:\s*[\d\.\-Ee]+ eV))',
str_operation=str_to_energy_components, repeats=False, convert=False),
# in some cases, the energy components are also printed for after a calculation
# same format as in scf iteration, they are printed also in initialization
# so we should get last occurence
Quantity(
'energy_components',
rf'{re_n} *Total energy components:([\s\S]+?)((?:{re_n}{re_n}|\| Electronic free energy per atom\s*:\s*[\d\.\-Ee]+ eV))',
repeats=True, str_operation=str_to_energy_components, convert=False),
Quantity(
'energy_xc',
rf'{re_n} *Start decomposition of the XC Energy([\s\S]+?)End decomposition of the XC Energy',
str_operation=str_to_energy_components, repeats=False, convert=False),
eigenvalues,
Quantity(
'forces', rf'{re_n} *Total atomic forces.*?\[eV/Ang\]:\s*([\d\.Ee\-\+\s\|]+)',
str_operation=str_to_atomic_forces, repeats=False, convert=False),
# TODO no metainfo for scf forces but old parser put it in atom_forces_free_raw
Quantity(
'forces_raw', rf'{re_n} *Total forces\([\s\d]+\)\s*:([\s\d\.\-\+Ee]+){re_n}', repeats=True,
dtype=float),
Quantity(
'time_force_evaluation',
rf'{re_n} *\| Time for this force evaluation\s*:\s*([\d\.]+) s\s*([\d\.]+) s',
repeats=False, dtype=float),
Quantity(
'total_dos_files', r'Calculating total density of states([\s\S]+?)\-{5}',
str_operation=str_to_dos_files, repeats=False, convert=False),
Quantity(
'atom_projected_dos_files', r'Calculating atom\-projected density of states([\s\S]+?)\-{5}',
str_operation=str_to_dos_files, repeats=False, convert=False),
Quantity(
'species_projected_dos_files', r'Calculating angular momentum projected density of states([\s\S]+?)\-{5}',
str_operation=str_to_dos_files, repeats=False, convert=False),
Quantity(
'gw_eigenvalues', r'(state\s*occ_num\s*e_gs[\s\S]+?)\s*\| Total time',
str_operation=str_to_gw_eigs, repeats=False, convert=False),
Quantity(
'vdW_TS',
rf'(Evaluating non\-empirical van der Waals correction[\s\S]+?)(?:\|\s*Converged\.|\-{5}{re_n}{re_n})',
repeats=False, sub_parser=TextParser(quantities=[
Quantity(
'kind', r'Evaluating non\-empirical van der Waals correction \(([\w /]+)\)',
repeats=False, convert=False, flatten=False),
Quantity(
'atom_hirshfeld', r'\| Atom\s*\d+:([\s\S]+?)\-{5}',
str_operation=str_to_hirshfeld, repeats=True, convert=False)])),
Quantity(
'converged', r'Self\-consistency cycle (converged)\.', repeats=False, dtype=str)]
tail = '|'.join([
r'Time for this force evaluation\s*:\s*[s \d\.]+',
r'Final output of selected total energy values',
r'No geometry change',
r'Leaving FHI\-aims',
r'\Z'])
self._quantities = [
Quantity(
Program.version, r'(?:Version|FHI\-aims version)\s*\:*\s*([\d\.]+)\s*',
repeats=False),
Quantity(
xsection_run.x_fhi_aims_program_compilation_date, r'Compiled on ([\d\/]+)',
repeats=False),
Quantity(
xsection_run.x_fhi_aims_program_compilation_time, r'at (\d+\:\d+\:\d+)',
repeats=False),
Quantity(
Program.compilation_host, r'on host ([\w\.\-]+)',
repeats=False),
Quantity(
xsection_run.x_fhi_aims_program_execution_date, r'Date\s*:\s*([0-9]+)',
repeats=False),
Quantity(
xsection_run.x_fhi_aims_program_execution_time, rf'Time\s*:\s*([0-9\.]+){re_n}',
repeats=False),
Quantity(
TimeRun.cpu1_start,
r'Time zero on CPU 1\s*:\s*([0-9\-E\.]+)\s*(?P<__unit>\w+)\.',
repeats=False),
Quantity(
TimeRun.wall_start,
r'Internal wall clock time zero\s*:\s*([0-9\-E\.]+)\s*(?P<__unit>\w+)\.',
repeats=False),
Quantity(
Run.raw_id, r'aims_uuid\s*:\s*([\w\-]+)', repeats=False),
Quantity(
xsection_run.x_fhi_aims_number_of_tasks, r'Using\s*(\d+)\s*parallel tasks',
repeats=False),
Quantity(
x_fhi_aims_section_parallel_task_assignement.x_fhi_aims_parallel_task_nr,
r'Task\s*(\d+)\s*on host', repeats=True),
Quantity(
x_fhi_aims_section_parallel_task_assignement.x_fhi_aims_parallel_task_host,
r'Task\s*\d+\s*on host\s*([\s\S]+?)reporting', repeats=True, flatten=False),
Quantity(
'fhi_aims_files', r'(?:FHI\-aims file:|Parsing)\s*([\w\/\.]+)', repeats=True),
Quantity(
'array_size_parameters',
r'Basic array size parameters:\s*([\|:\s\w\.\/]+:\s*\d+)', repeats=False,
str_operation=str_to_array_size_parameters, convert=False),
Quantity(
xsection_method.x_fhi_aims_controlInOut_hse_unit,
r'hse_unit: Unit for the HSE06 hybrid functional screening parameter set to\s*(\w)',
str_operation=FHIAimsControlParser.str_to_unit, repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlInOut_hybrid_xc_coeff,
r'hybrid_xc_coeff: Mixing coefficient for hybrid-functional exact exchange modified to\s*([\d\.]+)',
repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlInOut_k_grid,
rf'{re_n} *Found k-point grid:\s*([\d ]+)', repeats=False),
Quantity(
xsection_run.x_fhi_aims_controlInOut_MD_time_step,
rf'{re_n} *Molecular dynamics time step\s*=\s*([\d\.]+)\s*(?P<__unit>[\w]+)',
repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlInOut_relativistic,
rf'{re_n} *Scalar relativistic treatment of kinetic energy:\s*([\w\- ]+)',
repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlInOut_relativistic,
rf'{re_n} *(Non-relativistic) treatment of kinetic energy',
repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlInOut_relativistic_threshold,
rf'{re_n} *Threshold value for ZORA:\s*([\d\.Ee\-\+])', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlInOut_xc,
rf'{re_n} *XC:\s*(?:Using)*\s*([\w\- ]+) with OMEGA =\s*([\d\.Ee\-\+]+)',
repeats=False, dtype=None),
Quantity(
xsection_method.x_fhi_aims_controlInOut_xc,
r'XC: (?:Running|Using) ([\-\w \(\) ]+)', repeats=False),
Quantity(
xsection_method.x_fhi_aims_controlInOut_xc,
rf'{re_n} *(Hartree-Fock) calculation starts \.\.\.', repeats=False),
Quantity(
'band_segment_points',
r'Plot band\s*\d+\s*\|\s*begin[ \d\.\-]+\s*\|\s*end[ \d\.\-]+\s*\|\s*number of points:\s*(\d+)',
repeats=True),
Quantity(
'species',
rf'(Reading configuration options for species [\s\S]+?)(?:{re_n} *Finished|{re_n} *{re_n})',
str_operation=str_to_species_in, repeats=False),
Quantity(
'control_inout',
rf'{re_n} *Reading file control\.in\.\s*\-*\s*([\s\S]+?)'
r'(?:Finished reading input file \'control\.in\'|Input file control\.in ends\.)', repeats=False,
sub_parser=TextParser(quantities=[
Quantity(
'species', r'Reading configuration options for (species[\s\S]+?)grid points\.',
repeats=True, str_operation=str_to_species)])),
# assign the initial geometry to full scf as no change in structure is done
# during the initial scf step
Quantity(
'lattice_vectors',
r'Input geometry:\s*\|\s*Unit cell:\s*'
r'\s*\|\s*([\d\.\-\+eE\s]+)\s*\|\s*([\d\.\-\+eE\s]+)\s*\|\s*([\d\.\-\+eE\s]+)',
repeats=False, unit='angstrom', shape=(3, 3), dtype=float),
Quantity(
'structure',
rf'Atomic structure.*:\s+.*x \[A\]\s*y \[A\]\s*z \[A\]([\s\S]+?Species[\s\S]+?(?:{re_n} *{re_n}| 1\: ))',
repeats=False, convert=False, sub_parser=TextParser(quantities=structure_quantities)),
Quantity(
'full_scf',
r'Begin self-consistency loop: Initialization'
rf'([\s\S]+?(?:{tail}))',
repeats=True, sub_parser=TextParser(quantities=calculation_quantities)),
Quantity(
'geometry_optimization',
rf'{re_n} *Geometry optimization: Attempting to predict improved coordinates\.'
rf'([\s\S]+?(?:{tail}))',
repeats=True, sub_parser=TextParser(quantities=calculation_quantities)),
Quantity(
'molecular_dynamics',
rf'{re_n} *Molecular dynamics: Attempting to update all nuclear coordinates\.'
rf'([\s\S]+?(?:{tail}))',
repeats=True, sub_parser=TextParser(quantities=calculation_quantities))]
# TODO add SOC perturbed eigs, dielectric function
def get_number_of_spin_channels(self):
return self.get('array_size_parameters', {}).get('Number of spin channels', 1)
def get_calculation_type(self):
calculation_type = 'single_point'
if self.get('geometry_optimization') is not None:
calculation_type = 'geometry_optimization'
elif self.get('molecular_dynamics', None) is not None:
calculation_type = 'molecular_dynamics'
return calculation_type
class FHIAimsParser:
def __init__(self):
self.out_parser = FHIAimsOutParser()
self.control_parser = FHIAimsControlParser()
self.dos_parser = DataTextParser()
self.bandstructure_parser = DataTextParser()
self._xc_map = {
'Perdew-Wang parametrisation of Ceperley-Alder LDA': [
{'name': 'LDA_C_PW'}, {'name': 'LDA_X'}],
'Perdew-Zunger parametrisation of Ceperley-Alder LDA': [
{'name': 'LDA_C_PZ'}, {'name': 'LDA_X'}],
'VWN-LDA parametrisation of VWN5 form': [
{'name': 'LDA_C_VWN'}, {'name': 'LDA_X'}],
'VWN-LDA parametrisation of VWN-RPA form': [
{'name': 'LDA_C_VWN_RPA'}, {'name': 'LDA_X'}],
'AM05 gradient-corrected functionals': [
{'name': 'GGA_C_AM05'}, {'name': 'GGA_X_AM05'}],
'BLYP functional': [{'name': 'GGA_C_LYP'}, {'name': 'GGA_X_B88'}],
'PBE gradient-corrected functionals': [
{'name': 'GGA_C_PBE'}, {'name': 'GGA_X_PBE'}],
'PBEint gradient-corrected functional': [
{'name': 'GGA_C_PBEINT'}, {'name': 'GGA_X_PBEINT'}],
'PBEsol gradient-corrected functionals': [
{'name': 'GGA_C_PBE_SOL'}, {'name': 'GGA_X_PBE_SOL'}],
'RPBE gradient-corrected functionals': [
{'name': 'GGA_C_PBE'}, {'name': 'GGA_X_RPBE'}],
'revPBE gradient-corrected functionals': [
{'name': 'GGA_C_PBE'}, {'name': 'GGA_X_PBE_R'}],
'PW91 gradient-corrected functionals': [
{'name': 'GGA_C_PW91'}, {'name': 'GGA_X_PW91'}],
'M06-L gradient-corrected functionals': [
{'name': 'MGGA_C_M06_L'}, {'name': 'MGGA_X_M06_L'}],
'M11-L gradient-corrected functionals': [
{'name': 'MGGA_C_M11_L'}, {'name': 'MGGA_X_M11_L'}],
'TPSS gradient-corrected functionals': [
{'name': 'MGGA_C_TPSS'}, {'name': 'MGGA_X_TPSS'}],
'TPSSloc gradient-corrected functionals': [
{'name': 'MGGA_C_TPSSLOC'}, {'name': 'MGGA_X_TPSS'}],
'hybrid B3LYP functional': [
{'name': 'HYB_GGA_XC_B3LYP5'}],
'Hartree-Fock': [{'name': 'HF_X'}],
'HSE': [{'name': 'HYB_GGA_XC_HSE03'}],
'HSE-functional': [{'name': 'HYB_GGA_XC_HSE06'}],
'hybrid-PBE0 functionals': [
{'name': 'GGA_C_PBE'}, {
'name': 'GGA_X_PBE', 'weight': lambda x: 0.75 if x is None else 1.0 - x},
{'name': 'HF_X', 'weight': lambda x: 0.25 if x is None else x}],
'hybrid-PBEsol0 functionals': [
{'name': 'GGA_C_PBE_SOL'}, {
'name': 'GGA_X_PBE_SOL', 'weight': lambda x: 0.75 if x is None else 1.0 - x},
{'name': 'HF_X', 'weight': lambda x: 0.25 if x is None else x}],
'Hybrid M06 gradient-corrected functionals': [{'name': 'HYB_MGGA_XC_M06'}],
'Hybrid M06-2X gradient-corrected functionals': [
{'name': 'HYB_MGGA_XC_M06_2X'}],
'Hybrid M06-HF gradient-corrected functionals': [
{'name': 'HYB_MGGA_XC_M06_HF'}],
'Hybrid M08-HX gradient-corrected functionals': [
{'name': 'HYB_MGGA_XC_M08_HX'}],
'Hybrid M08-SO gradient-corrected functionals': [
{'name': 'HYB_MGGA_XC_M08_SO'}],
'Hybrid M11 gradient-corrected functionals': [{'name': 'HYB_MGGA_XC_M11'}]}
# TODO update metainfo to reflect all energy corrections
# why section_vdW_TS under x_fhi_aims_section_controlInOut_atom_species?
self._energy_map = {
'Total energy uncorrected': 'energy_total',
'Total energy corrected': 'energy_total_t0',
'Electronic free energy': 'energy_free',
'X Energy': 'energy_exchange',
'C Energy GGA': 'energy_correlation',
'Total XC Energy': 'energy_xc',
'X Energy LDA': 'x_fhi_aims_energy_X_LDA',
'C Energy LDA': 'x_fhi_aims_energy_C_LDA',
'Sum of eigenvalues': 'energy_sum_eigenvalues',
'XC energy correction': 'energy_correction_xc',
'XC potential correction': 'energy_xc_potential',
'Free-atom electrostatic energy': 'x_fhi_aims_energy_electrostatic_free_atom',
'Hartree energy correction': 'energy_correction_hartree',
'vdW energy correction': 'energy_van_der_waals',
'Entropy correction': 'energy_correction_entropy',
'Total energy': 'energy_total',
'Total energy, T -> 0': 'energy_total_t0',
'Kinetic energy': 'energy_kinetic_electronic',
'Electrostatic energy': 'energy_electrostatic',
'error in Hartree potential': 'energy_correction_hartree',
'Sum of eigenvalues per atom': 'energy_sum_eigenvalues_per_atom',
'Total energy (T->0) per atom': 'energy_total_t0_per_atom',
'Electronic free energy per atom': 'energy_free_per_atom',
'Hartree-Fock part': 'energy_hartree_fock_x_scaled',
# GW
'Galitskii-Migdal Total Energy': 'x_fhi_aims_scgw_galitskii_migdal_total_energy',
'GW Kinetic Energy': 'x_fhi_aims_scgw_kinetic_energy',
'Hartree energy from GW density': 'x_fhi_aims_scgw_hartree_energy_sum_eigenvalues',
'GW correlation Energy': 'x_fhi_aims_energy_scgw_correlation_energy',
'RPA correlation Energy': 'x_fhi_aims_scgw_rpa_correlation_energy',
'Sigle Particle Energy': 'x_fhi_aims_single_particle_energy',
'Fit accuracy for G(w)': 'x_fhi_aims_poles_fit_accuracy',
# Convergence
'Change of total energy': 'energy_change',
}
self._relativity_map = {
'Non-relativistic': None,
'ZORA': 'scalar_relativistic',
'on-site free-atom approximation to ZORA': 'scalar_relativistic_atomic_ZORA'
}
self._property_map = {
'atom': 'x_fhi_aims_atom_type_vdW',
'Free atom volume': 'x_fhi_aims_free_atom_volume',
'Hirshfeld charge': 'x_fhi_aims_hirschfeld_charge',
'Hirshfeld volume': 'x_fhi_aims_hirschfeld_volume'
}
def get_fhiaims_file(self, default):
base, *ext = default.split('.')
ext = '.'.join(ext)
base = base.lower()
files = os.listdir(self.maindir)
files = [os.path.basename(f) for f in files]
files = [os.path.join(
self.maindir, f) for f in files if base.lower() in f.lower() and f.endswith(ext)]
files.sort()
return files
def parse_configurations(self):
sec_run = self.archive.run[-1]
def parse_bandstructure():
band_segments_points = self.out_parser.get('band_segment_points')
if band_segments_points is None:
return
# band structure, unlike dos is not a property of a section_scc but of the
# the whole run. dos output file is contained in a section
sec_scc = sec_run.calculation[-1]
# get the fermi energy for this SCC: if it is not found, the band
# structure cannot be reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi_ev = energy_fermi.to(ureg.electron_volt).magnitude
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
nspin = self.out_parser.get_number_of_spin_channels()
nbands = None
for n in range(len(band_segments_points)):
bandstructure_files = [os.path.join(
self.out_parser.maindir, 'band%d%03d.out' % (s + 1, n + 1)) for s in range(nspin)]
data = []
for band_file in bandstructure_files:
self.bandstructure_parser.mainfile = band_file
if self.bandstructure_parser.data is None:
break
data.append(self.bandstructure_parser.data)
if len(data) == 0:
continue
data = np.transpose(data)
eigs = (np.transpose(
data[5::2]) + energy_fermi_ev) * ureg.eV
nbands = np.shape(eigs)[-1] if n == 0 else nbands if nbands is not None else np.shape(eigs)[-1]
if nbands != np.shape(eigs)[-1]:
self.logger.warn('Inconsistent number of bands found in bandstructure data.')
continue
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.kpoints = np.transpose(data[1:4])[0]
occs = np.transpose(data[4::2])
# the band energies stored in the band*.out files have already
# been shifted to the fermi energy. This shift is undone so
# that the energy scales for for energy_reference_fermi, band
# energies and the DOS energies match.
sec_k_band_segment.energies = eigs
sec_k_band_segment.occupations = occs
def read_dos(dos_file):
dos_file = self.get_fhiaims_file(dos_file)
if not dos_file:
return
self.dos_parser.mainfile = dos_file[0]
if self.dos_parser.data is None:
return
return np.transpose(self.dos_parser.data)
def read_projected_dos(dos_files):
dos = []
dos_dn = []
n_atoms = 0
l_max = []
for dos_file in dos_files:
data = read_dos(dos_file)
if data is None or np.size(data) == 0:
continue
# we first read the spin_dn just to make sure the spin dn component exists
if 'spin_up' in dos_file:
data_dn = read_dos(dos_file.replace('spin_up', 'spin_dn'))
if data_dn is None:
continue
# the maximum l is arbitrary for different species, so we cant stack
dos_dn.append(data_dn[1:])
l_max.append(len(data[1:]))
# column 0 is energy column 1 is total
energy = data[0]
dos.append(data[1:])
n_atoms += 1
if not dos:
return None, None
energies = energy * ureg.eV
# we cut the l components up to the minimum (or pad zeros?)
n_l = min(l_max)
n_spin = 2 if dos_dn else 1
dos = [d[:n_l] for d in dos]
if dos_dn:
dos_dn = [d[:n_l] for d in dos_dn]
dos = [dos, dos_dn]
dos = np.transpose(np.reshape(
dos, (n_spin, n_atoms, n_l, len(energies))), axes=(2, 0, 1, 3))
dos = dos / ureg.eV
return energies, dos
def parse_dos(section):
sec_scc = sec_run.calculation[-1]
sec_dos = None
energies = None
lattice_vectors = section.get(
'lattice_vectors', self.out_parser.get('lattice_vectors'))
if lattice_vectors is None:
lattice_vectors = np.eye(3) * ureg.angstrom
volume = np.abs(np.linalg.det(lattice_vectors.magnitude)) * ureg.angstrom ** 3
n_spin = self.out_parser.get_number_of_spin_channels()
# parse total first, we expect only one file
total_dos_files, _ = section.get('total_dos_files', [['KS_DOS_total_raw.dat'], []])
for dos_file in total_dos_files:
data = read_dos(dos_file)
if data is None or np.size(data) == 0:
continue
sec_dos = sec_scc.m_create(Dos, Calculation.dos_electronic)
energies = data[0] * ureg.eV
sec_dos.n_energies = len(energies)
sec_dos.energies = energies
# dos unit is 1/(eV-cell volume)
dos = data[1: n_spin + 1] * (1 / ureg.eV) * volume.to('m**3').magnitude
for spin in range(len(dos)):
sec_dos_values = sec_dos.m_create(DosValues, Dos.total)
sec_dos_values.spin = spin
sec_dos_values.value = dos[spin]
# parse projected
# projected does for different spins on separate files
# we only include spin_up, add spin_dn in loop
for projection_type in ['atom', 'species']:
proj_dos_files, species = section.get(
'%s_projected_dos_files' % projection_type, [[], []])
proj_dos_files = [f for f in proj_dos_files if 'spin_dn' not in f]
if proj_dos_files:
if sec_dos is None:
sec_dos = sec_scc.m_create(Dos, Calculation.dos_electronic)
energies, dos = read_projected_dos(proj_dos_files)
if dos is None:
continue
sec_def = Dos.atom_projected if projection_type == 'atom' else Dos.species_projected
n_l = len(dos[1:])
lm_values = np.column_stack((np.arange(n_l), np.zeros(n_l, dtype=np.int32)))
for lm in range(len(dos)):
for spin in range(len(dos[lm])):
for atom in range(len(dos[lm][spin])):
sec_dos_values = sec_dos.m_create(DosValues, sec_def)
sec_dos.m_kind = 'integrated'
if lm > 0:
# the first one is total so no lm label
sec_dos_values.lm = lm_values[lm - 1]
sec_dos_values.spin = spin
if projection_type == 'atom':
sec_dos_values.atom_index = atom
else:
sec_dos_values.atom_label = species[atom]
sec_dos_values.value = dos[lm][spin][atom]
def get_eigenvalues(section):
data = section.get('eigenvalues', [None])[-1]
if data is None:
return
n_spin = self.out_parser.get_number_of_spin_channels()
kpts = data.get('kpoints', [np.zeros(3)] * n_spin)
if len(kpts) % n_spin != 0:
self.logger.warn('Inconsistent number of spin channels found.')
n_spin -= 1
kpts = np.reshape(kpts, (len(kpts) // n_spin, n_spin, 3))
kpts = np.transpose(kpts, axes=(1, 0, 2))[0]
kpts = None if len(kpts) == 0 else kpts
occs_eigs = data.get('occupation_eigenvalue')
n_eigs = len(occs_eigs) // (len(kpts) * n_spin)
occs_eigs = np.transpose(
np.reshape(occs_eigs, (len(kpts), n_spin, n_eigs, 2)), axes=(3, 1, 0, 2))
return kpts, occs_eigs[1] * ureg.hartree, occs_eigs[0]
def parse_scf(iteration):
sec_scc = sec_run.calculation[-1]
sec_scf = sec_scc.m_create(ScfIteration)
date_time = iteration.get('date_time')
if date_time is not None:
sec_scf.x_fhi_aims_scf_date_start = date_time[0]
sec_scf.x_fhi_aims_scf_time_start = date_time[1]
sec_energy = sec_scf.m_create(Energy)
energies = iteration.get('energy_components', {})
convergence = iteration.get('scf_convergence', {})
energies.update(convergence)
for key, val in energies.items():
metainfo_key = self._energy_map.get(key, None)
if metainfo_key is not None:
if metainfo_key == 'energy_change':
sec_energy.change = val
elif metainfo_key.startswith('energy_') and not metainfo_key.endswith('per_atom'):
try:
setattr(sec_energy, metainfo_key.replace('energy_', ''), EnergyEntry(
value=val, value_per_atom=energies.get('%s_per_atom' % metainfo_key)))
except Exception:
self.logger.warn('Error setting scf energy metainfo.', data=dict(key=metainfo_key))
else:
try:
setattr(sec_scf, metainfo_key, val)
except Exception:
self.logger.warn('Error setting scf energy metainfo.', data=dict(key=metainfo_key))
if iteration.get('fermi_level') is not None:
sec_energy.fermi = iteration.get('fermi_level')
# eigenvalues scf iteration
eigenvalues = get_eigenvalues(iteration)
if eigenvalues is not None:
sec_eigenvalues = sec_scf.m_create(BandEnergies)
if eigenvalues[0] is not None:
sec_eigenvalues.kpoints = eigenvalues[0]
sec_eigenvalues.energies = eigenvalues[1]
sec_eigenvalues.occupations = eigenvalues[2]
# stress tensor
stress_tensor = iteration.get('stress_tensor')
if stress_tensor is not None:
sec_stress = sec_scf.m_create(Stress)
sec_stress.total = StressEntry(value=stress_tensor)
# pressure
pressure = iteration.get('pressure')
if pressure is not None:
sec_thermo = sec_scf.m_create(Thermodynamics)
sec_thermo.pressure = pressure
def parse_gw(section):
gw_scf_energies = section.get('gw_self_consistency')
gw_eigenvalues = section.get('gw_eigenvalues')
if gw_scf_energies is None and gw_eigenvalues is None:
return
sec_scc = sec_run.m_create(Calculation)
sec_scf_iteration = sec_scc.m_create(ScfIteration)
if gw_scf_energies is not None:
for energies in gw_scf_energies:
sec_scf_iteration = sec_scc.m_create(ScfIteration)
for key, val in energies.items():
metainfo_key = self._energy_map.get(key, None)
if metainfo_key is not None:
try:
setattr(sec_scf_iteration, metainfo_key, val)
except Exception:
self.logger.warn('Error setting gw metainfo.', data=dict(key=metainfo_key))
self._electronic_structure_method = 'scGW' if len(gw_scf_energies) > 1 else 'G0W0'
metainfo_map = {
'occ_num': 'occupations', 'e_gs': 'value_ks', 'e_x^ex': 'value_exchange',
'e_xc^gs': 'value_ks_xc', 'e_c^nloc': 'value_correlation', 'e_qp': 'value_qp'}
if gw_eigenvalues is not None:
sec_eigs_gw = sec_scc.m_create(BandEnergies)
for key, name in metainfo_map.items():
# TODO verify shape of eigenvalues
val = gw_eigenvalues[key] if key == 'occ_num' else gw_eigenvalues[key] * ureg.eV
setattr(sec_eigs_gw, name, np.reshape(val, (1, 1, len(val))))
def parse_vdW(section):
# these are not actually vdW outputs but vdW control parameters but are
# printed within the calculation section.
# TODO why is x_fhi_aims_section_vdW_TS under x_fhi_aims_section_controlInOut_atom_species
# we would then have to split the vdW parameters by species
atoms = section.get('vdW_TS', {}).get('atom_hirshfeld', [])
if not atoms:
return
# get species from section_atom_type
sec_atom_type = sec_run.method[-1].atom_parameters
if not sec_atom_type:
return
for sec in sec_atom_type:
for atom in atoms:
if sec.label == atom['atom']:
sec_vdW_ts = sec.x_fhi_aims_section_controlInOut_atom_species[-1].m_create(
x_fhi_aims_section_vdW_TS)
for key, val in atom.items():
metainfo_name = self._property_map.get(key, None)
if metainfo_name is None:
continue
val = val[0] if len(val) == 1 else val
try:
setattr(sec_vdW_ts, metainfo_name, val)
except Exception:
self.logger.warn('Error setting vdW metainfo.', data=dict(key=metainfo_name))
# TODO add the remanining properties
self._electronic_structure_method = 'DFT'
sec_run.method[-1].electronic.van_der_waals_method = 'TS'
def parse_section(section):
lattice_vectors = section.get(
'lattice_vectors', self.out_parser.get('lattice_vectors'))
structure = section.get(
'structure', self.out_parser.get('structure'))
pbc = [lattice_vectors is not None] * 3
if structure is None:
return
sec_system = sec_run.m_create(System)
sec_atoms = sec_system.m_create(Atoms)
if lattice_vectors is not None:
sec_atoms.lattice_vectors = lattice_vectors
sec_atoms.periodic = pbc
sec_atoms.labels = structure.get('labels')
sec_atoms.positions = structure.get('positions') * ureg.angstrom
velocities = structure.get('velocities')
if velocities is not None:
sec_atoms.velocities = velocities * ureg.angstrom / ureg.ps
sec_scc = sec_run.m_create(Calculation)
sec_scc.system_ref = sec_system
sec_energy = sec_scc.m_create(Energy)
energy = section.get('energy', {})
energy.update(section.get('energy_components', [{}])[-1])
energy.update(section.get('energy_xc', {}))
for key, val in energy.items():
metainfo_key = self._energy_map.get(key, None)
if metainfo_key is None:
continue
elif key == 'vdW energy correction':
kind = section.get('vdW_TS', {}).get('kind', 'Tkatchenko/Scheffler 2009')
sec_energy.van_der_waals = EnergyEntry(value=val, kind=kind)
elif metainfo_key.startswith('x_fhi_aims_energy'):
setattr(sec_scc, metainfo_key, val)
elif metainfo_key.startswith('energy_') and not metainfo_key.endswith('per_atom'):
try:
setattr(sec_energy, metainfo_key.replace('energy_', ''), EnergyEntry(
value=val, value_per_atom=energy.get('%s_per_atom' % metainfo_key)))
except Exception:
self.logger.warn('Error setting energy metainfo.', data=dict(key=key))
# eigenvalues
eigenvalues = get_eigenvalues(section)
# get if from last scf iteration
if eigenvalues is not None:
sec_eigenvalues = sec_scc.m_create(BandEnergies)
if eigenvalues[0] is not None:
sec_eigenvalues.kpoints = eigenvalues[0]
sec_eigenvalues.energies = eigenvalues[1]
sec_eigenvalues.occupations = eigenvalues[2]
# TODO add force contributions and stress
forces = section.get('forces', None)
if forces is not None:
sec_forces = sec_scc.m_create(Forces)
sec_forces.free = ForcesEntry(value=forces)
forces_raw = section.get('forces_raw', None)
if forces_raw is not None:
# we are actually reading the scf forces so we take only the last iteration
try:
# TODO This is a temporary fix to a huge md run I cannot test.
# see calc_id=a8r8KkvKXWams50UhzMGCxY0IGqH
sec_forces.free.value_raw = forces_raw[-len(forces):] * ureg.eV / ureg.angstrom
except Exception:
self.logger.warn('Error setting raw forces.')
time_calculation = section.get('time_force_evaluation')
if time_calculation is not None:
sec_scc.time_calculation = time_calculation
scf_iterations = section.get('self_consistency', [])
sec_scc.n_scf_iterations = len(scf_iterations)
for scf_iteration in scf_iterations:
parse_scf(scf_iteration)
sec_scc.calculation_converged = section.get(
'converged') == 'converged'
# how about geometry optimization convergence
# density of states
parse_dos(section)
# fermi level
fermi_energy = 0.0
if scf_iterations:
fermi_energy = scf_iterations[-1].get('fermi_level')
fermi_energy = fermi_energy.to('joule').magnitude if fermi_energy else 0.0
sec_scc.energy.fermi = fermi_energy
# gw
parse_gw(section)
# vdW parameters
parse_vdW(section)
if self._electronic_structure_method in ['DFT', 'G0W0', 'scGW']:
sec_method = sec_run.m_create(Method)
sec_scc.method_ref = sec_run.method[-1]
sec_method.electronic = Electronic(method=self._electronic_structure_method)
if self._electronic_structure_method == 'DFT':
sec_method.core_method_ref = sec_run.method[0]
else:
sec_method.gw = GWMethod(type=self._electronic_structure_method)
sec_method.starting_method_ref = sec_run.method[0]
sec_method.methods_ref = [sec_run.method[0]]
for section in self.out_parser.get('full_scf', []):
parse_section(section)
for section in self.out_parser.get('geometry_optimization', []):
parse_section(section)
for section in self.out_parser.get('molecular_dynamics', []):
parse_section(section)
if not sec_run.calculation:
return
# bandstructure
parse_bandstructure()
# sampling method
sec_workflow = self.archive.m_create(Workflow)
sec_workflow.type = self.out_parser.get_calculation_type()
def parse_method(self):
sec_run = self.archive.run[-1]
sec_method = sec_run.m_create(Method)
sec_method.basis_set.append(BasisSet(type='numeric AOs'))
sec_electronic = sec_method.m_create(Electronic)
sec_electronic.method = self._electronic_structure_method
sec_dft = sec_method.m_create(DFT)
# control parameters from out file
self.control_parser.mainfile = self.filepath
# we use species as marker that control parameters are printed in out file
species = self.control_parser.get('species')
# if not in outfile read it from control.in
if species is None:
control_file = self.get_fhiaims_file('control.in')
if not control_file:
control_file = [os.path.join(self.out_parser.maindir, 'control.in')]
self.control_parser.mainfile = control_file[0]
def parse_basis_set(species):
sec_basis_set = sec_method.m_create(x_fhi_aims_section_controlIn_basis_set)
basis_funcs = ['gaussian', 'hydro', 'valence', 'ion_occ', 'ionic', 'confined']
for key, val in species.items():
if key == 'species':
sec_basis_set.x_fhi_aims_controlIn_species_name = val[0]
elif key == 'angular_grids':
sec_basis_set.x_fhi_aims_controlIn_angular_grids_method = val[0]
elif key == 'division':
pass
elif key in basis_funcs:
for i in range(len(val)):
sec_basis_func = sec_basis_set.m_create(
x_fhi_aims_section_controlIn_basis_func)
sec_basis_func.x_fhi_aims_controlIn_basis_func_type = key
sec_basis_func.x_fhi_aims_controlIn_basis_func_n = int(val[i][0])
sec_basis_func.x_fhi_aims_controlIn_basis_func_l = str(val[i][1])
if len(val[i]) == 3 and hasattr(val[i][2], 'real'):
sec_basis_func.x_fhi_aims_controlIn_basis_func_radius = val[i][2]
elif key in ['cut_pot', 'radial_base']:
setattr(sec_basis_set, 'x_fhi_aims_controlIn_%s' % key, np.array(
val[0], dtype=float))
else:
try:
setattr(sec_basis_set, 'x_fhi_aims_controlIn_%s' % key, val[0])
except Exception:
self.logger.warn('Error setting controlIn metainfo.', data=dict(key=key))
# is the number of basis functions equal to number of divisions?
division = species.get('division', None)
if division is not None:
sec_basis_set.x_fhi_aims_controlIn_number_of_basis_func = len(division)
sec_basis_set.x_fhi_aims_controlIn_division = division
for key, val in self.control_parser.items():
if val is None:
# TODO consider also none entries? or (isinstance(val, str) and val == 'none'):
continue
if key.startswith('x_fhi_aims_controlIn'):
try:
if key == 'x_fhi_aims_controlIn_hse_unit':
val = str(val)
setattr(sec_method, key, val)
except Exception:
self.logger.warn('Error setting controlIn metainfo.', data=dict(key=key))
elif key == 'occupation_type':
sec_method.x_fhi_aims_controlIn_occupation_type = val[0]
sec_method.x_fhi_aims_controlIn_occupation_width = val[1]
if len(val) > 2:
sec_method.x_fhi_aims_controlIn_occupation_order = int(val[2])
elif key == 'relativistic':
if isinstance(val, str):
val = [val]
sec_method.x_fhi_aims_controlIn_relativistic = ' '.join(val[:2])
if len(val) > 2:
sec_method.x_fhi_aims_controlIn_relativistic_threshold = val[2]
elif key == 'species':
for species in val:
parse_basis_set(species)
elif key == 'xc':
if isinstance(val, str):
val = [val]
xc = ' '.join([v for v in val if isinstance(v, str)])
sec_method.x_fhi_aims_controlIn_xc = str(xc)
if not isinstance(val[-1], str) and xc.lower().startswith('hse'):
unit = self.control_parser.get('x_fhi_aims_controlIn_hse_unit')
hse_omega = val[-1] * unit if unit else val[-1]
sec_method.x_fhi_aims_controlIn_hse_omega = hse_omega
hybrid_coeff = self.control_parser.get('x_fhi_aims_controlIn_hybrid_xc_coeff')
if hybrid_coeff is not None:
# is it necessary to check if xc is a hybrid type aside from hybrid_coeff
sec_method.x_fhi_aims_controlIn_hybrid_xc_coeff = hybrid_coeff
inout_exclude = [
'x_fhi_aims_controlInOut_relativistic', 'x_fhi_aims_controlInOut_xc',
'x_fhi_aims_controlInOut_hse_unit', 'x_fhi_aims_controlInOut_hybrid_xc_coeff']
# add controlInOut parameters
for key, val in self.out_parser.items():
if key.startswith('x_fhi_aims_controlInOut') and val is not None:
if key not in inout_exclude:
try:
setattr(sec_method, key, val)
except Exception:
self.logger.warn('Error setting controlInOut metainfo.', data=dict(key=key))
nspin = self.out_parser.get_number_of_spin_channels()
sec_method.x_fhi_aims_controlInOut_number_of_spin_channels = nspin
sec_electronic.n_spin_channels = nspin
# convert relativistic
relativistic = self.out_parser.get('x_fhi_aims_controlInOut_relativistic')
if relativistic is not None:
if not isinstance(relativistic, str):
relativistic = ' '.join(relativistic)
sec_method.x_fhi_aims_controlInOut_relativistic = relativistic
relativistic = self._relativity_map.get(relativistic, None)
if relativistic is not None:
sec_electronic.relativity_method = relativistic
# atom species
self.parse_topology()
# xc functional from output
xc_inout = self.out_parser.get('x_fhi_aims_controlInOut_xc', None)
if xc_inout is not None:
xc_inout = [xc_inout] if isinstance(xc_inout, str) else xc_inout
xc = ' '.join([v for v in xc_inout if isinstance(v, str)])
sec_method.x_fhi_aims_controlInOut_xc = str(xc)
# hse func
hse_omega = None
if not isinstance(xc_inout[-1], str) and xc.lower().startswith('hse'):
unit = self.out_parser.get('x_fhi_aims_controlInOut_hse_unit')
hse_omega = xc_inout[-1] * unit if unit else xc_inout[-1]
sec_method.x_fhi_aims_controlInOut_hse_omega = hse_omega
hybrid_coeff = self.out_parser.get('x_fhi_aims_controlInOut_hybrid_xc_coeff')
if hybrid_coeff is not None:
sec_method.x_fhi_aims_controlIn_hybrid_xc_coeff = hybrid_coeff
# convert parsed xc to meta info
xc_meta_list = self._xc_map.get(xc, [])
sec_xc_functional = sec_dft.m_create(XCFunctional)
for xc_meta in xc_meta_list:
name = xc_meta.get('name')
functional = Functional(name=name)
weight = xc_meta.get('weight', None)
if weight is not None and hybrid_coeff is not None:
functional.weight = weight(float(hybrid_coeff))
xc_parameters = dict()
if hse_omega is not None:
hybrid_coeff = 0.25 if hybrid_coeff is None else hybrid_coeff
xc_parameters.setdefault('$\\omega$ in m^-1', hse_omega.to('1/m').magnitude)
if hybrid_coeff is not None:
xc_parameters.setdefault('hybrid coefficient $\\alpha$', hybrid_coeff)
if xc_parameters:
functional.parameters = xc_parameters
if '_X_' in name or name.endswith('_X'):
sec_xc_functional.exchange.append(functional)
elif '_C_' in name or name.endswith('_C'):
sec_xc_functional.correlation.append(functional)
elif 'HYB' in name:
sec_xc_functional.hybrid.append(functional)
else:
sec_xc_functional.contributions.append(functional)
def parse_topology(self):
sec_method = self.archive.run[-1].method[-1]
def parse_atom_type(species):
sec_atom_type = sec_method.m_create(AtomParameters)
sec_atom_species = sec_atom_type.m_create(
x_fhi_aims_section_controlInOut_atom_species)
for key, val in species.items():
if key == 'nuclear charge':
charge = val[0] * ureg.elementary_charge
sec_atom_type.charge = charge
sec_atom_species.x_fhi_aims_controlInOut_species_charge = charge
elif key == 'atomic mass':
mass = val[0][0] * ureg.amu
sec_atom_type.mass = mass
sec_atom_species.x_fhi_aims_controlInOut_species_mass = mass
elif key == 'species':
sec_atom_type.label = val
sec_atom_species.x_fhi_aims_controlInOut_species_name = val
elif 'request to include pure gaussian fns' in key:
sec_atom_species.x_fhi_aims_controlInOut_pure_gaussian = val[0]
elif 'cutoff potl' in key:
sec_atom_species.x_fhi_aims_controlInOut_species_cut_pot = val[0][0] * ureg.angstrom
sec_atom_species.x_fhi_aims_controlInOut_species_cut_pot_width = val[0][1] * ureg.angstrom
sec_atom_species.x_fhi_aims_controlInOut_species_cut_pot_scale = val[0][2]
elif 'free-atom' in key or 'free-ion' in key:
for i in range(len(val)):
sec_basis_func = sec_atom_species.m_create(
x_fhi_aims_section_controlInOut_basis_func)
sec_basis_func.x_fhi_aims_controlInOut_basis_func_type = ' '.join(key.split()[:-1])
sec_basis_func.x_fhi_aims_controlInOut_basis_func_n = val[i][0]
sec_basis_func.x_fhi_aims_controlInOut_basis_func_l = val[i][1]
sec_basis_func.x_fhi_aims_controlInOut_basis_func_occ = val[i][2]
elif 'hydrogenic' in key:
for i in range(len(val)):
sec_basis_func = sec_atom_species.m_create(
x_fhi_aims_section_controlInOut_basis_func)
sec_basis_func.x_fhi_aims_controlInOut_basis_func_type = ' '.join(key.split()[:-1])
sec_basis_func.x_fhi_aims_controlInOut_basis_func_n = val[i][0]
sec_basis_func.x_fhi_aims_controlInOut_basis_func_l = val[i][1]
sec_basis_func.x_fhi_aims_controlInOut_basis_func_eff_charge = val[i][2]
elif 'ionic' in key:
for i in range(len(val)):
sec_basis_func = sec_atom_species.m_create(
x_fhi_aims_section_controlInOut_basis_func)
sec_basis_func.x_fhi_aims_controlInOut_basis_func_type = 'ionic basis'
sec_basis_func.x_fhi_aims_controlInOut_basis_func_n = val[i][0]
sec_basis_func.x_fhi_aims_controlInOut_basis_func_l = val[i][1]
elif 'basis function' in key:
for i in range(len(val)):
sec_basis_func = sec_atom_species.m_create(
x_fhi_aims_section_controlInOut_basis_func)
sec_basis_func.x_fhi_aims_controlInOut_basis_func_type = key.split(
'basis')[0].strip()
if val[i][0] == 'L':
sec_basis_func.x_fhi_aims_controlInOut_basis_func_gauss_l = val[i][2]
sec_basis_func.x_fhi_aims_controlInOut_basis_func_gauss_N = val[i][3]
alpha = [val[i][j + 2] for j in range(len(val[i])) if val[i][j] == 'alpha']
weight = [val[i][j + 2] for j in range(len(val[i])) if val[i][j] == 'weight']
alpha = np.array(alpha) * (1 / ureg.angstrom ** 2)
sec_basis_func.x_fhi_aims_controlInOut_basis_func_gauss_alpha = alpha
sec_basis_func.x_fhi_aims_controlInOut_basis_func_gauss_weight = weight
elif len(val[i]) == 2:
sec_basis_func.x_fhi_aims_controlInOut_basis_func_gauss_l = val[i][0]
alpha = np.array(val[i][1]) / ureg.angstrom ** 2
sec_basis_func.x_fhi_aims_controlInOut_basis_func_primitive_gauss_alpha = alpha
# add inout parameters read from main output
# species
species = self.out_parser.get('control_inout', {}).get('species')
if species is not None:
for specie in species:
parse_atom_type(specie)
def init_parser(self):
self.out_parser.mainfile = self.filepath
self.out_parser.logger = self.logger
self.control_parser.logger = self.logger
self.dos_parser.logger = self.logger
self.bandstructure_parser.logger = self.logger
def reuse_parser(self, parser):
self.out_parser.quantities = parser.out_parser.quantities
self.control_parser.quantities = parser.control_parser.quantities
def parse(self, filepath, archive, logger):
self.filepath = os.path.abspath(filepath)
self.archive = archive
self.maindir = os.path.dirname(self.filepath)
self.logger = logger if logger is not None else logging
self._electronic_structure_method = 'DFT'
self.init_parser()
sec_run = self.archive.m_create(Run)
sec_run.program = Program(
name='FHI-aims', version=self.out_parser.get('version', ''),
compilation_host=self.out_parser.get('compilation_host', ''))
sec_run.time_run = TimeRun(
cpu1_start=self.out_parser.get('cpu1_start', 0),
wall_start=self.out_parser.get('wall_start', 0))
section_run_keys = [
'x_fhi_aims_program_compilation_date', 'x_fhi_aims_program_compilation_time',
'x_fhi_aims_program_execution_date', 'x_fhi_aims_program_execution_time',
'raw_id', 'x_fhi_aims_number_of_tasks']
for key in section_run_keys:
value = self.out_parser.get(key)
if value is None:
continue
try:
setattr(sec_run, key, value)
except Exception:
self.logger.warn('Error setting run metainfo', data=dict(key=key))
sec_parallel_tasks = sec_run.m_create(x_fhi_aims_section_parallel_tasks)
# why embed section not just let task be an array
task_nrs = self.out_parser.get('x_fhi_aims_parallel_task_nr', [])
task_hosts = self.out_parser.get('x_fhi_aims_parallel_task_host', [])
for i in range(len(task_nrs)):
sec_parallel_task_assignement = sec_parallel_tasks.m_create(
x_fhi_aims_section_parallel_task_assignement)
sec_parallel_task_assignement.x_fhi_aims_parallel_task_nr = task_nrs[i]
sec_parallel_task_assignement.x_fhi_aims_parallel_task_host = task_hosts[i]
self.parse_method()
self.parse_configurations()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.