content stringlengths 5 1.05M |
|---|
# encoding: utf-8
'''Cache-related utilities.
'''
from types import *
import sys, os
__all__ = ['callable_cache_key', 'app_shared_key']
def callable_cache_key(node):
'''Calculate key unique enought to be used for caching callables.
'''
if not isinstance(node, (MethodType, FunctionType)):
return hash(node.__call__)^hash(node)
elif isinstance(node, MethodType):
return hash(node)^hash(node.im_class)
return node
def app_shared_key():
fn = sys.modules['__main__'].__file__
h = hash(fn)
if h < 0:
h = 'a%lx' % -h
else:
h = 'b%lx' % h
name = os.path.splitext(os.path.basename(fn))[0]
if name == '__init__':
name = os.path.basename(os.path.dirname(os.path.abspath(fn)))
return '%s_%s' % (name, h)
|
import abc
import dataclasses
import logging
from datetime import timedelta
from typing import Dict, List, Optional, Union
import confluent_kafka # type: ignore
from .auth import SASLAuth
from .errors import (DeliveryCallback, ErrorCallback, log_client_errors,
log_delivery_errors)
class Producer:
conf: 'ProducerConfig'
_producer: confluent_kafka.Producer
logger: logging.Logger
def __init__(self, conf: 'ProducerConfig') -> None:
self.logger = logging.getLogger("adc-streaming.producer")
self.conf = conf
self.logger.debug(f"connecting to producer with config {conf._to_confluent_kafka()}")
self._producer = confluent_kafka.Producer(conf._to_confluent_kafka())
def write(self,
msg: Union[bytes, 'Serializable'],
headers: Optional[Union[dict, list]] = None,
delivery_callback: Optional[DeliveryCallback] = log_delivery_errors) -> None:
if isinstance(msg, Serializable):
msg = msg.serialize()
self.logger.debug("writing message to %s", self.conf.topic)
if delivery_callback is not None:
self._producer.produce(self.conf.topic, msg, headers=headers,
on_delivery=delivery_callback)
else:
self._producer.produce(self.conf.topic, msg, headers=headers)
def flush(self, timeout: timedelta = timedelta(seconds=10)) -> int:
"""Attempt to flush enqueued messages. Return the number of messages still
enqueued after the attempt.
"""
n = self._producer.flush(timeout.total_seconds())
if n > 0:
self.logger.debug("flushed messages, %d still enqueued", n)
else:
self.logger.debug("flushed all messages")
return n
def close(self) -> int:
self.logger.debug("shutting down producer")
return self.flush()
def __enter__(self) -> 'Producer':
return self
def __exit__(self, type, value, traceback) -> bool:
if type == KeyboardInterrupt:
print("Aborted (CTRL-C).")
return True
if type is None and value is None and traceback is None:
n_unsent = self.close()
if n_unsent > 0:
raise Exception(f"{n_unsent} messages remain unsent, some data may have been lost!")
return False
return False
@dataclasses.dataclass
class ProducerConfig:
broker_urls: List[str]
topic: str
auth: Optional[SASLAuth] = None
error_callback: Optional[ErrorCallback] = log_client_errors
# produce_timeout sets the maximum amount of time that the backend can take
# to send a message to Kafka. Use a value of 0 to never timeout.
produce_timeout: timedelta = timedelta(seconds=10)
# produce_backoff_time sets the time the backend will wait before retrying
# to send a message to Kafka. May not be less than one millisecond.
produce_backoff_time: timedelta = timedelta(milliseconds=100)
# use_idempotence instructs the backend whether to ensure that messages are
# recorded by the broker exactly once and in the order of production.
use_idempotence: bool = False
# reconnect_backoff_time is the time that the backend should initially wait
# before attempting to reconnect to Kafka if its connection fails.
# Repeated failures will cause the wait time to be increased exponentially,
# with a random variation, until reconnect_max_time is reached.
reconnect_backoff_time: timedelta = timedelta(milliseconds=100)
# reconnect_max_time is the longest time that the backend should wait
# between attempts to reconnect to Kafka.
reconnect_max_time: timedelta = timedelta(seconds=10)
def _to_confluent_kafka(self) -> Dict:
def as_ms(td: timedelta):
"""Convert a timedelta object to a duration in milliseconds"""
return int(td.total_seconds() * 1000.0)
if self.produce_backoff_time < timedelta(milliseconds=1):
raise ValueError("produce_backoff_time may not be less than one millisecond")
config = {
"bootstrap.servers": ",".join(self.broker_urls),
"enable.idempotence": self.use_idempotence,
"message.timeout.ms": as_ms(self.produce_timeout),
"reconnect.backoff.max.ms": as_ms(self.reconnect_max_time),
"reconnect.backoff.ms": as_ms(self.reconnect_backoff_time),
"retry.backoff.ms": as_ms(self.produce_backoff_time),
}
if self.error_callback is not None:
config["error_cb"] = self.error_callback
if self.auth is not None:
config.update(self.auth())
return config
class Serializable(abc.ABC):
def serialize(self) -> bytes:
raise NotImplementedError()
|
'''
Function Name : ProcessDisplay
Description : In This Application It Generate Log Files After Every 5 Minutes And Store All Log File In Running_Process Folder
Function Date : 19 July 2021
Function Author : Prasad Dangare
Input : Get The Directory Name
Output : It Create One Folder Which Contains All Log Files Of Current Running Processes
'''
# ===================
#
# Imports
#
# ===================
import os
import time
import psutil
from sys import *
import schedule
# ==============================
#
# Running Process Creation Operation
#
# ==============================
def ProcessDisplay(FolderName = "Running_Process"):
Data = []
if not os.path.exists(FolderName):
os.mkdir(FolderName)
File_Path = os.path.join(FolderName,"Running_Process%s.log" % time.ctime()) # due to ctime it display day,time,year and data is in string format
File_Path = (File_Path.replace(" ","").replace(":","")) # if space is found in the path then handle it
fd = open(File_Path,"w")
for proc in psutil.process_iter():
value = proc.as_dict(attrs = ['pid','name','username'])
Data.append(value)
for element in Data:
fd.write("%s\n" % element)
# =======================
#
# Entry Point
#
# =======================
def main():
print("\n")
print("------ Python Automation ------")
print("---- Running Process Identifier ----")
print("\n")
print("Script title : "+argv[0])
print("Folder And Log File Create After One Minute And To Close This Application Press ctrl with c")
if(argv[1] == "-u") or (argv[1] == "-U"):
print("Usage : Application_Name Scheule_Time Directory_Name")
exit()
if(argv[1] == "-h") or (argv[1] == "-H"):
print("Help : It is used to create log file of running processess")
exit()
schedule.every(int(argv[5])).minutes.do(ProcessDisplay) # here we schdedule time as 5 minute for the log file
while True: # create log file until the program execution is terminated
schedule.run_pending()
time.sleep(1)
# =======================
#
# Code Starter
#
# =======================
if __name__ == "__main__":
main()
|
import paramiko
import getpass
from time import sleep
username = raw_input("Enter your username: ")
#Remove below sharp sign to hard code your password
#password = "YOUR PASSWORD HERE"
#use below line to ask for password and echo it to user
#password = raw_input("Enter your password: ")
#use below line to ask for password without echoing it to user
password = getpass.getpass("Enter your password: ")
###########define servers
#use a text file containing your IP addresses (one IP per line)
f = open('.\\IP_List.txt')
for line in f:
line = line.strip()
print "connecting to " + (line)
ip_address = line
#############################
#########SSH client##########
#############################
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=line, username=username, password=password)
print "Successfully connected to: ", line
sleep(0.5)
#############################
###########COMMANDS##########
#############################
remote_connection = ssh_client.invoke_shell()
sleep(0.1)
#remote_connection.send("ls -h /etc/cron.d/ | cat > list.txt && cat list.txt\n")
sleep(0.5)
#IF YOU NEED TO SWITCH TO ROOT USER USE BELOW LINES
remote_connection.send("sudo su\n")
sleep(0.1)
remote_connection.send("PASSWORD\n")
sleep(0.5)
remote_connection.send("echo '#!/bin/bash' > script.sh\n")
remote_connection.send("echo 'echo \"###########HOSTNAME IS:#############\"' >> script.sh\n")
remote_connection.send("echo 'printenv | grep HOSTNAME=' >> script.sh\n")
remote_connection.send("echo 'for cronfile in /etc/cron.d/*' >> script.sh\n")
remote_connection.send("echo do >> script.sh\n")
#remote_connection.send("echo 'echo \"########FILE NAME IS#########\"' >> script.sh\n")
#remote_connection.send("echo 'echo \"$cronfile\"' >> script.sh\n")
remote_connection.send("echo 'echo \"########The FILE Contains:#########\"' >> script.sh\n")
remote_connection.send("echo cat '$cronfile' >> script.sh\n")
remote_connection.send("echo done >> script.sh && chmod 777 script.sh\n")
sleep(0.1)
remote_connection.send("./script.sh > cron-files.txt\n")
sleep(0.1)
remote_connection.send("cat cron-files.txt\n")
remote_connection.send("mkdir /mnt/smb\n")
remote_connection.send("mount //WINDOWS FILE SHARE/PATH /mnt/smb -o user=YOUR USERNAME \n")
sleep(0.1)
#SEND THE PASSWORD OF WINDOWS SHARE USING BELOW LINE
remote_connection.send("PASSWORD OF WINDOWS SHARE\n")
sleep(0.2)
remote_connection.send("cp cron-files.txt/mnt/smb/$HOSTNAME-Cron-Summary.txt\n")
sleep(0.1)
remote_connection.send("umount /mnt/smb\n")
remote_connection.send("rm -rf script.sh\n")
remote_connection.send("rm -rf cron-files.txt\n")
remote_connection.send("rm -rf /mnt/smb\n")
#remote_connection.send("rm -rf cron-files.txt\n")
###########print output
sleep(0.1)
output = remote_connection.recv(65535)
print output
###########closing session
ssh_client.close()
f.close()
exit()
|
import os, _cbstools
__dir__ = os.path.abspath(os.path.dirname(__file__))
class JavaError(Exception):
def getJavaException(self):
return self.args[0]
def __str__(self):
writer = StringWriter()
self.getJavaException().printStackTrace(PrintWriter(writer))
return u"\n".join((unicode(super(JavaError, self)), u" Java stacktrace:", unicode(writer)))
class InvalidArgsError(Exception):
pass
_cbstools._set_exception_types(JavaError, InvalidArgsError)
CLASSPATH = [os.path.join(__dir__, "cbstools.jar"), os.path.join(__dir__, "cbstools-lib.jar"), os.path.join(__dir__, "commons-math3-3.5.jar"), os.path.join(__dir__, "Jama-mipav.jar")]
CLASSPATH = os.pathsep.join(CLASSPATH)
_cbstools.CLASSPATH = CLASSPATH
_cbstools._set_function_self(_cbstools.initVM, _cbstools)
from _cbstools import *
|
# -*- coding: utf-8 -*-
#
# setup.py
#
# Author: Michael E. Tryby
# US EPA - ORD/NRMRL
#
''' Setup up script for nrtest_swmm package. '''
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
entry_points = {
'nrtest.compare': [
'swmm allclose = nrtest_swmm:swmm_allclose_compare',
'swmm report = nrtest_swmm:swmm_report_compare',
# Add the entry point for new comparison functions here
]
}
setup(
name='nrtest-swmm',
version='0.4.0',
description="SWMM extension for nrtest",
author="Michael E. Tryby",
author_email='tryby.michael@epa.gov',
url='https://github.com/USEPA',
packages=['nrtest_swmm',],
entry_points=entry_points,
install_requires=[
'header_detail_footer>=2.3',
'nrtest>=0.2.0',
'numpy>=1.7.0',
'swmm_output',
],
keywords='nrtest_swmm'
)
|
class LossScaler(object):
r"""Base class for implementing custom loss scaler strategies
Once the scaler is configured, no user intervention is needed to update loss scale during training.
Note:
This class should never be instantiated, but used as an abstract class for custom loss scaling strategy.
"""
def __init__(self, loss_scale):
assert isinstance(loss_scale, (int, float)) and loss_scale > 0, "'loss_scale' must be a positive float"
self._input_name = None
self._loss_scale = float(loss_scale)
self._initial_loss_scale = float(loss_scale)
@property
def input_name(self):
return self._input_name
@input_name.setter
def input_name(self, input_name):
assert isinstance(input_name, str), "'input_name' must be a string"
assert input_name is None or len(input_name) > 0, "'input_name' cannot be empty"
self._input_name = input_name
@property
def loss_scale(self):
return self._loss_scale
@loss_scale.setter
def loss_scale(self, loss_scale):
assert isinstance(loss_scale, (int, float)) and loss_scale > 0, "'loss_scale' must be a positive float"
self._loss_scale = float(loss_scale)
def reset(self):
r"""Resets loss scaler internal state"""
self._loss_scale = self._initial_loss_scale
def update(self, train_step_info):
r"""Updates loss based on user input and training session info
Args:
train_step_info (TrainStepInfo): last step state information
Returns:
Updated loss scale (float)
"""
raise NotImplementedError
class DynamicLossScaler(LossScaler):
r"""Default implementation for :py:class:`.LossScaler` class used for mixed precision
This loss scaler works by assuming an initial scale, which is doubled every time a certain number of
(stable) training steps are performed without exploding gradients (overflow or reach infinity).
When at least one of the gradients explode, loss scale is divided by 2.
Users can use this class in two ways:
1. Enable mixed precision and not setting a loss scaler class. Default values are used
2. Enable mixed precision and instantiate this class to override default arguments
Static loss scaling can be achieved by setting :py:attr:`.automatic_update` to :py:obj:`False`
and not performing manual :py:meth:`update` in train loop.
Args:
automatic_update (bool, default is False): boolean switch that allows :py:meth:`ORTTrainer.train_step`
to automatically perform loss scaling. If False, an explicit call to :py:meth:`.update` must be done by the user,
otherwise static loss scaling is performed.
loss_scale (default is 1 << 16): A float that represents current loss scale
up_scale_window (int, default is 2000): number of stable train steps before doubling loss scale
min_loss_scale (float, default is 1): min value for the loss scale. Used when loss scale is decreased
max_loss_scale (float, default is 1 << 24): max value for the loss scale. Used when loss scale is increased
Example with default values:
.. code-block:: python
scaler1 = amp.DynamicLossScaler()
print(f'Default loss scale is {scaler1.loss_scale}')
Example with user specified values:
.. code-block:: python
scaler2 = amp.DynamicLossScaler(loss_scale=1<<8)
print(f'Custom loss scale is {scaler2.loss_scale}')
"""
def __init__(self, automatic_update=True,
loss_scale=float(1 << 16),
up_scale_window=2000,
min_loss_scale=1.0,
max_loss_scale=float(1 << 24)):
super().__init__(loss_scale)
self.automatic_update = automatic_update
self.up_scale_window = up_scale_window
self.min_loss_scale = min_loss_scale
self.max_loss_scale = max_loss_scale
self._stable_steps_count = 0
def reset(self):
super().reset()
self._stable_steps_count = 0
def update(self, train_step_info):
if not self.automatic_update:
return self.loss_scale
if train_step_info.all_finite:
self._stable_steps_count += 1
if self._stable_steps_count >= self.up_scale_window:
self.loss_scale = min(self.max_loss_scale, self.loss_scale * 2)
self._stable_steps_count = 0
else:
self.loss_scale = max(self.min_loss_scale, self.loss_scale / 2)
self._stable_steps_count = 0
return self.loss_scale
|
import numpy as np
import scipy as sp
from scipy.stats import norm
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
from scipy import sparse
# 机器学习中的预测问题通常分为2类:回归与分类。
# 简单的说回归就是预测数值,而分类是给数据打上标签归类。
def showplt():
# arange 根据start与stop指定的范围以及step设定的步长,生成一个 ndarray
x = np.arange(0, 1, 0.002)
# scipy.stats.norm.rvs正态分布
# (1)作用:构造正态分布的数据
# (2)函数:scipy.stats.norm.rvs(size=100,loc=0,scal=1)
# scipy.norm正态分布模块的随机变量函数rvs,size=100是样本大小,loc=0是均值,scal=1是标准差
y = norm.rvs(loc=0, size=500, scale=0.1)
y = y + x**2
# scatter 散点图
plt.scatter(x, y)
plt.show()
# 均方误差根
def rmse(y_test, y):
return sp.sqrt(np.mean((y_test - y)**2))
# R-平方 与均值相比的优秀程度,介于[0~1]。0表示不如均值。1表示完美预测.这个版本的实现是参考scikit-learn官网文档
def R2(y_test, y_true):
return 1 - ((y_test - y_true)**2).sum() / (
(y_true - y_true.mean())**2).sum()
# 这是Conway&White《机器学习使用案例解析》里的版本
def R22(y_test, y_true):
y_mean = np.array(y_true)
y_mean[:] = y_mean.mean()
return 1 - rmse(y_test, y_true) / rmse(y_mean, y_true)
def main():
"""
线性回归以及数据过拟合
"""
plt.scatter(x, y, s=5)
degree = [1, 2, 100]
y_test = []
y_test = np.array(y_test)
for d in degree:
# PolynomialFeatures 多项式数据转换
clf = Pipeline([('poly', PolynomialFeatures(degree=d)),('linear', linear_model.LinearRegression(fit_intercept=False))])
# 训练
clf.fit(x[:, np.newaxis], y)
# 预测 np.newaxis 为 numpy.ndarray(多维数组)增加一个轴 np.newaxis 在使用和功能上等价于 None,其实就是 None 的一个别名。
y_test = clf.predict(x[:, np.newaxis])
print(clf.named_steps['linear'].coef_)
print('rmse=%.2f, R2=%.2f, R22=%.2f, clf.score=%.2f' %
(rmse(y_test, y), R2(y_test, y), R22(y_test, y),
clf.score(x[:, np.newaxis], y)))
plt.plot(x, y_test, linewidth=2)
plt.grid()
plt.legend(['1', '2', '100'], loc='upper left')
plt.show()
# 线性回归
def liner_simple():
print("线性回归")
x = [[0, 0], [1, 1], [2, 2]]
y = [0, 1, 2]
reg = linear_model.LinearRegression(
copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)
reg.fit(x, y)
print(reg.coef_)
def example():
diabetes = datasets.load_diabetes()
data = diabetes.data[:, np.newaxis, 2]
print(data)
X_train = data[:-20]
X_test = data[-20:]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
print('Coefficients: ', regr.coef_)
print("mse: %.2f" % mean_squared_error(y_test, y_pred))
# Explained variance score: 1 is perfect prediction
print('score: %.2f' % r2_score(y_test, y_pred))
# Plot outputs
show_scatter(X_test,y_test,y_pred)
def ridge_reg():
"""
岭回归
"""
diabetes = datasets.load_diabetes()
data = diabetes.data[:, np.newaxis, 2]
X_train = data[:-20]
X_test = data[-20:]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
reg = linear_model.Ridge(alpha=0.5, copy_X=True, fit_intercept=True,
max_iter=None, normalize=False, random_state=None, solver="auto", tol=1e-3)
reg.fit(X_train, y_train)
print("系数:", reg.coef_)
print("系数:", reg.intercept_)
y_pred = reg.predict(X_test)
show_scatter(X_test,y_test,y_pred)
def lasso_reg():
"""
锁套回归
是估计稀疏系数的线性模型。 它在一些情况下是有用的,因为它倾向于使用具有较少参数值的情况,
有效地减少给定解决方案所依赖变量的数量。 因此,Lasso及其变体是压缩感知领域的基础。
"""
diabetes = datasets.load_diabetes()
data = diabetes.data[:, np.newaxis, 2]
X_train = data[:-20]
X_test = data[-20:]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
reg = linear_model.Lasso(alpha=0.5,fit_intercept=True,normalize=True,precompute=False,copy_X=True,max_iter=1000,tol=1e-4)
reg.fit(X_train, y_train)
print("系数:", reg.coef_)
print("系数:", reg.intercept_)
y_pred = reg.predict(X_test)
show_scatter(X_test,y_test,y_pred)
def loss_example():
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator =sp.sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2.) ** 2 + (y - l / 2.) ** 2 < (l / 2.) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return np.logical_xor(res, ndimage.binary_erosion(res))
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
rgr_ridge =linear_model.Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
rgr_lasso =linear_model.Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,right=1)
plt.show()
def multiTaskLasso_reg():
"""
多任务 lasso 多元回归稀疏系数的线性模型
y 是一个 (n_samples, n_tasks) 的二维数组,其约束条件和其他回归问题(也称为任务)是一样的,都是所选的特征值。
"""
rng = np.random.RandomState(42)
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([linear_model.Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ =linear_model.MultiTaskLasso(alpha=1.).fit(X, Y).coef_
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,label='Truth Value')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
def show_scatter(X_test,y_test,y_pred):
# Plot outputs
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
def diabetes_reg():
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = linear_model.LinearRegression()
regr.fit(diabetes_X_train, diabetes_y_train)
print('Input Values')
print(diabetes_X_test)
diabetes_y_pred = regr.predict(diabetes_X_test)
print("Predicted Output Values")
print(diabetes_y_pred)
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, diabetes_y_pred, color='red', linewidth=1)
plt.show()
if __name__ == '__main__':
# main()
# run()
# liner_simple()
# example()
# ridge_reg()
# lasso_reg()
# loss_example()
# multiTaskLasso_reg()
diabetes_reg()
|
import unittest
from main import Sudoku
class TestSudoku(unittest.TestCase):
def setUp(self):
self.sudoku = Sudoku([
[0, 0, 0, 2, 6, 0, 7, 0, 1],
[6, 8, 0, 0, 7, 0, 0, 9, 0],
[1, 9, 0, 0, 0, 4, 5, 0, 0],
[8, 2, 0, 1, 0, 0, 0, 4, 0],
[0, 0, 4, 6, 0, 2, 9, 0, 0],
[0, 5, 0, 0, 0, 3, 0, 2, 8],
[0, 0, 9, 3, 0, 0, 0, 7, 4],
[0, 4, 0, 0, 5, 0, 0, 3, 6],
[7, 0, 3, 0, 1, 8, 0, 0, 0]
])
self.sudoku2 = Sudoku([
[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9]
])
def test_Should_return_true_on_isPossible_method_call_1(self):
self.assertEqual(True, self.sudoku.isPossible(2, 2, value=7))
def test_Should_return_true_on_isPossible_method_call_2(self):
self.assertEqual(True, self.sudoku.isPossible(7, 3, value=9))
def test_Should_return_false_on_isPossible_method_call_1(self):
self.assertEqual(False, self.sudoku.isPossible(3, 5, value=1))
def test_Should_return_false_on_isPossible_method_call_2(self):
self.assertEqual(False, self.sudoku.isPossible(2, 7, value=2))
def test_Should_return_false_on_isPossible_method_call_3(self):
self.assertEqual(False, self.sudoku.isPossible(1, 3, value=1))
def test_Should_return_solved_sudoku_1(self):
self.sudoku.solve()
self.assertEqual(self.sudoku.board, [
[4, 3, 5, 2, 6, 9, 7, 8, 1],
[6, 8, 2, 5, 7, 1, 4, 9, 3],
[1, 9, 7, 8, 3, 4, 5, 6, 2],
[8, 2, 6, 1, 9, 5, 3, 4, 7],
[3, 7, 4, 6, 8, 2, 9, 1, 5],
[9, 5, 1, 7, 4, 3, 6, 2, 8],
[5, 1, 9, 3, 2, 6, 8, 7, 4],
[2, 4, 8, 9, 5, 7, 1, 3, 6],
[7, 6, 3, 4, 1, 8, 2, 5, 9]
])
def test_Should_return_solved_sudoku_2(self):
self.sudoku2.solve()
self.assertEqual(self.sudoku2.board, [
[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9]
])
if __name__ == '__main__':
unittest.main() |
import argparse
from .suite import *
from .synth import *
from .real import *
def parse_arguments():
parser = argparse.ArgumentParser()
group_save_load = parser.add_mutually_exclusive_group()
group_save_load.add_argument("--save", help="File path to store the session data.")
group_save_load.add_argument(
"--load", help="File path to load and plot session data."
)
group_figures = parser.add_mutually_exclusive_group()
group_figures.add_argument(
"--tight", help="Show tight figures.", action="store_true"
)
group_figures.add_argument(
"--no-display", help="Don't display any figures.", action="store_true"
)
parser.add_argument(
"--runs",
type=int,
default=1000,
help="Number of runs each scenario is instantiated.",
)
parser.add_argument(
"--datasets-prefix",
default="data",
help="Specifies the prefix folder holding all datasets. If no single folder exists, consider creating one with the aid of symbolic links.",
)
parser.add_argument(
"--print-mode",
default=None,
choices=["console", "latex"],
help="Specializes the printing to console to generate LaTeX friendly tables.",
)
return parser.parse_args()
|
r"""@package motsfinder.utils
General utilities for simplifying certain tasks in Python.
"""
from __future__ import print_function
import functools
import importlib.util
from builtins import range, map
from tempfile import NamedTemporaryFile
from glob import glob
import os
import os.path as op
import re
import subprocess
import time
from timeit import default_timer
import datetime
from contextlib import contextmanager
import numpy as np
from .lockfile import LockFile
__all__ = [
"get_git_commit",
"import_file_as_module",
"lmap",
"lrange",
"isiterable",
"get_chunks",
"parallel_compute",
"process_pool",
"merge_dicts",
"update_dict",
"insert_missing",
"print_indented",
"timethis",
"cache_method_results",
"find_file",
"find_files",
"update_user_data_in_file",
]
def get_git_commit():
r"""Return the output of `git describe` to identify the current version.
Note that `git` has to be installed and the project source needs to be in
a git repository at runtime.
"""
cwd = op.dirname(op.realpath(__file__))
result = subprocess.check_output(
["git", "describe", "--always", "--dirty"],
cwd=cwd
)
return result.decode('utf-8').strip()
def import_file_as_module(fname, mname='loaded_module'):
r"""Given a filename, load it as a Python module.
Any classes or functions defined in the file can then be retrieved as
attributes from the returned module. This allows e.g. filenames to be
provided as arguments to functions in case a function should be loaded
from that file during runtime.
"""
spec = importlib.util.spec_from_file_location(mname, fname)
cfg_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(cfg_module)
return cfg_module
def lmap(func, *iterables):
r"""Implementation of `map` that returns a list instead of a generator."""
# This is efficient even in Python 2 due to the builtins.map import.
return list(map(func, *iterables))
def lrange(*args):
r"""Convenience wrapper to call `list(range(...))`."""
return list(range(*args))
def isiterable(obj):
"""Check whether an object is iterable.
Note that this returns `True` for strings, which you may or may not intend
to check for.
"""
try:
iter(obj)
except TypeError:
return False
return True
def cmp_versions(version1, version2):
r"""Compare two version strings losely.
The strings are split at "." and the corresponding parts compared as
integers. Non-integers count as 0. This allows a rough version comparison
like ``cmp_versions(mpl.__version__, "3.0")``. Optionally, either version
can be a list to skip the conversion step.
@return `-1` if `version1` is smaller than `version2`, `1` if it's larger,
and 0 if both are equal (within the lose comparison).
"""
def ii(x):
try:
return int(x)
except:
return 0
if isinstance(version1, str):
version1 = list(map(ii, version1.split(".")))
if isinstance(version2, str):
version2 = list(map(ii, version2.split(".")))
return 0 if version1 == version2 else -1 if version1 < version2 else 1
def get_chunks(items, chunksize):
"""Generator for partitioning items into chunks."""
if not isinstance(items, (list, tuple)):
items = list(items)
for i in range(0, len(items), chunksize):
yield items[i:i+chunksize]
def parallel_compute(func, arg_list, args=(), kwargs=None, processes=None,
callstyle='plain', pool=None):
r"""Perform a task on a list of arguments in parallel.
This uses multiple processes to perform the computation of `func` once for
each element of `arg_list` in parallel. This is *not* multithreading but
multiprocessing, which means that all objects (including any `self`
attached to `func` in case that is a bound object method) must be
picklable for cross-process transfer.
This function assumes that copying the necessary objects is expensive and
hence it does not use a processing pool to optimally distribute the tasks
(as that would copy the object bound to `func` on each call). Instead, the
argument list is chopped up into `N` tasks, where `N==processes` (if
given) is the number of parallel processes to invoke. Once all processes
have finished, the results are collected and returned in the order they
were passed in `arg_list`.
@param func
Callable to invoke for each element of `arg_list`. May be a bound
method in which case `self` must be picklable.
@param arg_list
Iterable of arguments to let `func` work on. Each element may be the
argument itself, or a list/tuple of arguments, or a dictionary with
keyword arguments. The interpretation is configured using `callstyle`.
@param args
Additional positional arguments to pass to `func` in each call.
@param kwargs
Additional keyword arguments to pass to `func` in each call.
@param processes
Number of processes to run in parallel. If not given, uses the number
of available threads of the current architecture.
@param callstyle
How to pass the individual arguments in `arg_list` to `func`.
Default is `plain`. The values mean: `plain` to pass the argument as
first positional argument, `list` to expand the argument into a list
and pass the elements as individual positional arguments, or `dict` to
treat the elements as dictionaries to pass as keyword arguments to
`func`.
@param pool
Optional pool to re-use. If not given, a new pool is created for the
given number of parallel tasks. Note that the child processes are
terminated only if the pool was created within this function. Any
supplied `pool` will be left as-is.
"""
styles = ['plain', 'list', 'dict']
if callstyle not in styles:
raise ValueError("Unknown callstyle '%s'. Valid styles: %s"
% (callstyle, ", ".join(styles)))
f = _FuncWrap(func, callstyle, args=args, kwargs=kwargs or dict())
if processes is None:
processes = len(os.sched_getaffinity(0))
if processes <= 1:
return list(map(f, arg_list))
chunks = [[] for i in range(processes)]
for items in list(get_chunks(arg_list, processes)):
for i, a in enumerate(items):
chunks[i].append(a)
runners = [_Runner(f, chunk) for chunk in chunks]
with process_pool(processes=processes, pool=pool) as p:
workers = [p.apply_async(runner, ()) for runner in runners]
results = [None] * len(arg_list)
for i, worker in enumerate(workers):
worker_results = worker.get()
for j, result in enumerate(worker_results):
results[i+j*processes] = result
return results
@contextmanager
def process_pool(processes=None, pool=None):
r"""Context to create a pool and terminate cleanly after usage.
@param processes
Number of processes to run in parallel. If not given, uses the number
of available threads of the current architecture.
@param pool
If given, simply returns that pool without terminating it afterwards.
"""
if pool is not None:
yield pool
else:
if processes is None:
processes = len(os.sched_getaffinity(0))
from multiprocessing import Pool
with Pool(processes=processes) as pool:
yield pool
class _Runner():
r"""Class to store a function and argument list chunk."""
__slots__ = ("_f", "_arg_chunk",)
def __init__(self, func, arg_chunk):
r"""Create a runner object for a given chunk of arguments."""
self._f = func
self._arg_chunk = arg_chunk
def __call__(self):
r"""Call the function on each of the arguments and return the result."""
return [self._f(arg) for arg in self._arg_chunk]
class _FuncWrap():
r"""Helper class to interpret arguments and call a function."""
__slots__ = ("_f", "_callstyle", "_args", "_kwargs")
def __init__(self, func, callstyle, args, kwargs):
r"""Create a function wrapper.
@param func
Function to use.
@param callstyle
Callstyle for invoking `func` (see parallel_compute()).
@param args
Positional arguments for `func` (see parallel_compute()).
@param kwargs
Keyword arguments for `func` (see parallel_compute()).
"""
self._f = func
self._callstyle = callstyle
self._args = args
self._kwargs = kwargs
def __call__(self, args):
r"""Call the function with the given args interpreted as configured."""
if self._callstyle == 'plain':
return self._f(args, *self._args, **self._kwargs)
if self._callstyle == 'list':
return self._f(*args, *self._args, **self._kwargs)
return self._f(*self._args, **args, **self._kwargs)
def merge_dicts(*dicts):
"""Merge two or more dicts, later ones replacing values of earlier ones.
Note that only shallow copies are made of the dicts.
@b Examples
```
a = dict(a=1, b=2, c=3)
b = dict(c=-3, d=-4)
c = merge_dicts(a, b)
# Result: dict(a=1, b=2, c=-3, d=-4)
```
"""
result = {}
for d in dicts:
result.update(d)
return result
def update_dict(dict_arg, **kwargs):
r"""Merge items into a dict non-destructively.
A new dict is returned, the original dict is not changed.
"""
return merge_dicts(dict_arg, kwargs)
def insert_missing(dict_arg, **kwargs):
"""Insert all missing kwargs into the given dict and return it.
Note that the original dict `dict_arg` is not altered.
"""
return merge_dicts(kwargs, dict_arg)
def print_indented(prefix, obj):
r"""Print a prefix followed by an object with correct indenting of multiple lines.
Printing an object that has a multi-line string representation looks ugly
when prefixed by another string.
@b Examples
\code
>>> A = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> print("A = %s" % A)
A = [[1 2 3]
[4 5 6]
[7 8 9]]
\endcode
With this function, we can do instead:
\code
>>> A = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> print_indented("A = ", A)
A = [[1 2 3]
[4 5 6]
[7 8 9]]
\endcode
"""
if isinstance(prefix, int):
prefix = " " * prefix
m = len(prefix)
lines = (prefix + str(obj)).splitlines()
result = [lines[0]]
result += [(" "*m)+l for l in lines[1:]]
print("\n".join(result))
@contextmanager
def printoptions(*args, **kwargs):
r"""Temporarily change the numpy print options.
This context can be used to temporarily change the print options for
numpy. After leaving the context, the previous settings are restored.
All arguments are passed to `numpy.set_printoptions()`, which currently
accepts the following parameters:
precision, threshold, edgeitems, linewidth, suppress, nanstr, infstr,
formatter
@b Examples
\code
import numpy as np
from motsfinder.utils import printoptions
with printoptions(precision=3, suppress=True, linewidth=120):
print np.random.rand(7,7)
\endcode
"""
prev_options = np.get_printoptions()
try:
np.set_printoptions(*args, **kwargs)
yield
finally:
np.set_printoptions(**prev_options)
@contextmanager
def timethis(start_msg=None, end_msg="Elapsed time: {}", silent=False, eol=True):
r"""Context manager for timing code execution.
@param start_msg
String to print at the beginning. May contain the placeholder
``'{now}'``, which will be replaced by the current date and time. A
value of `True` will be taken to mean ``"Started: {now}``.
@param end_msg
String to print after execution. Default is ``"Elapsed time: {}"``.
@param silent
Whether to print anything at all. May be useful when a function has a
verbosity setting to conditionally time its results.
@param eol
Whether to print a newline after each message. May be useful to print
execution time in line with the starting message.
"""
if silent:
yield
return
if start_msg is True:
start_msg = "Started: {now}"
if start_msg is not None:
print(start_msg.format(now=time.strftime('%Y-%m-%d %H:%M:%S')),
end='\n' if eol else '', flush=not eol)
start = default_timer()
try:
yield
finally:
if end_msg is not None:
time_str = datetime.timedelta(seconds=default_timer()-start)
print(end_msg.format(time_str))
def cache_method_results(key=None):
r"""Create a decorator to cache instance method results.
The decorator is rather simplistic and the overhead is comparable to a few
extra arithmetic operations.
It will create a new attribute ``'_method_cache'`` on the instance
containing a dictionary with keys for each cached method. These will also
be dictionaries containing the results for the different arguments.
@b Limitations
The following limitations are due to its simplicity:
* does not work for classes with slots unless the ``'_method_cache'``
slot is added
* cached methods cannot be called with keyword args (you may use a
non-cached wrapper method) or non-hashable types
* the cache is not limited to a certain size
* if a cached method of a subclasses has the same name as a cached
method of a super class, you need to specify the `key` argument to
set a unique key for the method
@param key
Unique key to store the method's results in. By default, the method
name is used (obtained via ``method.__name__``).
@b Examples
```
class MyClass():
@cache_method_results()
def some_lengthy_computation(self, a, b, c):
result = a**2 + b**3 + c**4
return result
```
"""
def cache_decorator(method):
fn_key = method.__name__ if key is None else key
@functools.wraps(method)
def wrapper(self, *args):
method_cache = getattr(self, '_method_cache', dict())
self._method_cache = method_cache
cache = method_cache[fn_key] = method_cache.get(fn_key, dict())
try:
return cache[args]
except KeyError:
result = method(self, *args)
cache[args] = result
return result
return wrapper
return cache_decorator
def save_to_file(filename, data, overwrite=False, verbose=True,
showname='data', mkpath=True):
r"""Save an object to disk.
This uses `numpy.save()` to store an object in a file. Use
load_from_file() to restore the data afterwards.
This operation is atomic for ``overwrite=True``. This means that any
failure during saving will leave the original file untouched. This may
happen e.g. when the data to save is not picklable or when classes have
been reloaded but the object to be saved is of the "old version" of that
class.
@param filename
The file name to store the data in. An extension ``'.npy'`` will be
added if not already there.
@param overwrite
Whether to overwrite an existing file with the same name. If `False`
(default) and such a file exists, a `RuntimeError` is raised. This is
not completely atomic, since we first write the data to a temporary
file, which is then renamed to the destination.
@param verbose
Whether to print when the file was written. Default is `True`.
@param showname
Name to print in the confirmation message in case `verbose==True`.
@param mkpath
If the parent folder(s) of the given filename don't exist, they are
created if ``mkpath==True`` (default). Otherwise, an error is raised.
@b Notes
The data will be put into a 1-element list to avoid creating 0-dimensional
numpy arrays.
"""
filename = op.expanduser(filename)
if not filename.endswith('.npy'):
filename += '.npy'
path = op.abspath(op.normpath(op.dirname(filename)))
if mkpath:
os.makedirs(path, exist_ok=True)
if op.exists(filename) and not overwrite:
raise RuntimeError("File already exists.")
tname = None
try:
with NamedTemporaryFile(dir=path, delete=False) as tfile:
tname = tfile.name
np.save(tfile, [data])
# Check again, since writing may have taken some time. Note that this
# is not truly atomic, i.e. multiple processes writing to the same
# filename have a (low) chance to overwrite each other's data here
# even with `overwrite=False`.
if op.exists(filename) and not overwrite:
raise RuntimeError("File already exists.")
# Silently overwrite an existing file
os.replace(tname, filename)
if verbose:
print("%s saved to: %s" % (showname, filename))
finally:
if tname is not None:
try:
os.unlink(tname) # clean up after any failures
except:
pass
def load_from_file(filename, allow_pickle=True, retries=0, sleep=5,
verbose=False, **kw):
r"""Load an object from disk.
If the object had been stored using save_to_file(), the result should be a
perfect copy of the object.
@param allow_pickle
Passed to `numpy.load()` to allow loading objects stored in the file.
@param retries
How often to retry in case of failure. Default is `0`, i.e. fail
immediately if something goes wrong.
@param sleep
Seconds to wait between trying. Default is `5`.
@param verbose
If `True`, print messages when loading fails. Default is `False`.
@param **kw
Further keyword arguments are passed to `numpy.load()`.
@b Notes
This assumes the object is the only element of a list stored in the file,
which will be the case if the file was created using save_to_file(). If
the data is not a single-element list, it is returned as is.
"""
filename = op.expanduser(filename)
for retry_count in range(retries+1):
try:
result = np.load(filename, allow_pickle=allow_pickle, **kw)
break
except Exception as e:
if retry_count == retries:
raise
if verbose:
print("Could not load %s" % filename)
print("%s: %s" % (type(e).__name__, e))
print(" ... will retry in %s seconds" % sleep)
time.sleep(sleep)
if result.shape == (1,):
return result[0]
# Not a single value. Return as is.
return result
def update_user_data_in_file(fname, data, keys_to_remove=(), retries=10,
sleep=10, verbose=True):
r"""Update the `user_data` of an object stored in a file.
This method is safe to call in concurrent situations even across nodes
accessing the file via NFS. The file is first locked for access by other
processes/nodes. It is then loaded, its data update, and finally written
back to the file. The lock is then released.
@param fname
Filename of the object to update.
@param data
Dictionary containing the data to add/replace in the object's
`user_data` dictionary.
@param keys_to_remove
Optional list of keys to delete from the `user_data`. Keys in this
list should not also appear in `data`.
@param retries
Number of times loading the file is retried in case access fails for
some reason. Default is `10`.
@param sleep
Seconds to sleep between trying to load the file. Default is `10`.
@param verbose
Whether to print messages notifying about having to wait for a foreign
lock to be released and when the update file has been written. Default
is `True`.
"""
with LockFile(fname, verbosity=1 if verbose else 0):
obj = load_from_file(
fname, retries=retries, sleep=sleep, verbose=verbose
)
for key in keys_to_remove:
obj.user_data.pop(key, None)
obj.user_data.update(data)
obj.save(fname, overwrite=True)
def find_file(pattern, recursive=False, skip_regex=None, regex=None,
load=False, full_output=False, verbose=False):
r"""Find a file based on a glob pattern and optional regex patterns.
Optionally, a matching file can be loaded. In this case, it is ignored if
the file exists but contains no data (i.e. `None` was saved).
@param pattern
Shell glob pattern. May contain ``'**'`` if `recursive=True`.
@param recursive
Activate recursive wildcard ``'**'`` in `pattern`. Default is `False`.
@param skip_regex
Files matching this regex will be ignored.
@param regex
If given, only files matching this regex will be considered.
@param load
If `True`, load the file and return the data. Default is `False`.
@param full_output
If `load=True`, return both the data and filename. Default is `False`,
i.e. only return the data. Ignored if `load=False`.
@param verbose
Print a note in case data was loaded from a file.
"""
result = find_files(pattern, recursive=recursive, skip_regex=skip_regex,
regex=regex, load=load, full_output=full_output,
max_num=1, verbose=verbose)
if len(result) == 1:
return result[0]
raise FileNotFoundError("No data found for pattern: %s" % pattern)
def find_files(pattern, recursive=False, skip_regex=None, regex=None,
load=False, full_output=False, max_num=None, verbose=False):
r"""Find files based on a glob pattern and optional regex patterns.
Optionally, all matching files can be loaded. In this case, files are
ignored if they contains no data (i.e. `None` was saved).
@param pattern
Shell glob pattern. May contain ``'**'`` if `recursive=True`.
@param recursive
Activate recursive wildcard ``'**'`` in `pattern`. Default is `False`.
@param skip_regex
Files matching this regex will be ignored.
@param regex
If given, only files matching this regex will be considered.
@param load
If `True`, load the files and return the data. Default is `False`.
@param full_output
If `load=True`, return both the data and filenames. Default is
`False`, i.e. only return the data. Ignored if `load=False`.
@param max_num
Maximum number of files to collect. Default is to collect all
matching files.
@param verbose
Print a note in case data was loaded from a file.
"""
result = []
for fn in glob(pattern, recursive=recursive):
if skip_regex and re.search(skip_regex, fn):
continue
if regex and not re.search(regex, fn):
continue
if load:
obj = load_from_file(fn)
if obj:
if verbose:
print("Data loaded: %s" % fn)
result.append((obj, fn) if full_output else obj)
else:
result.append(fn)
if max_num and len(result) == max_num:
break
return result
|
from django.db.models import F, OrderBy
class OrderableAggMixin:
def __init__(self, *expressions, ordering=(), **extra):
if not isinstance(ordering, (list, tuple)):
ordering = [ordering]
ordering = ordering or []
# Transform minus sign prefixed strings into an OrderBy() expression.
ordering = (
(OrderBy(F(o[1:]), descending=True) if isinstance(o, str) and o[0] == '-' else o)
for o in ordering
)
super().__init__(*expressions, **extra)
self.ordering = self._parse_expressions(*ordering)
def resolve_expression(self, *args, **kwargs):
self.ordering = [expr.resolve_expression(*args, **kwargs) for expr in self.ordering]
return super().resolve_expression(*args, **kwargs)
def as_sql(self, compiler, connection):
if self.ordering:
ordering_params = []
ordering_expr_sql = []
for expr in self.ordering:
expr_sql, expr_params = compiler.compile(expr)
ordering_expr_sql.append(expr_sql)
ordering_params.extend(expr_params)
sql, sql_params = super().as_sql(compiler, connection, ordering=(
'ORDER BY ' + ', '.join(ordering_expr_sql)
))
return sql, (*sql_params, *ordering_params)
return super().as_sql(compiler, connection, ordering='')
def set_source_expressions(self, exprs):
# Extract the ordering expressions because ORDER BY clause is handled
# in a custom way.
self.ordering = exprs[self._get_ordering_expressions_index():]
return super().set_source_expressions(exprs[:self._get_ordering_expressions_index()])
def get_source_expressions(self):
return super().get_source_expressions() + self.ordering
def _get_ordering_expressions_index(self):
"""Return the index at which the ordering expressions start."""
source_expressions = self.get_source_expressions()
return len(source_expressions) - len(self.ordering)
|
"""
Import json data from URL to Datababse
"""
import requests
import json
import os
from ....product.models import ProductVariant
from django.core.management.base import BaseCommand
from datetime import datetime
from ....settings import PROJECT_ROOT
from prices import Money
import decimal
class Command(BaseCommand):
def import_currencies(self):
raw = (requests.get('https://bank.gov.ua/NBUStatService/v1/statdirectory/exchange?json')).text
data_folder = os.path.join(PROJECT_ROOT, 'saleor','api_par_com', 'resources', 'json_file')
json_path = os.path.join(data_folder, 'currency.json')
output_file = open(json_path, 'w')
output_file.write(raw)
output_file.close()
#################################################################
#### Variant Currencies values updating ####
#################################################################
with open(os.path.join(data_folder, "currency.json"), encoding='utf-8', errors='ignore') as currency_file:
currency = json.loads(currency_file.read())
for currency_object in currency:
if currency_object['cc'] == 'PLN':
raw_rate = currency_object['rate']
rate = round(decimal.Decimal(raw_rate), 3)
print(type(rate))
#################################################################
#### Product Variant creating ####
#################################################################
with open(os.path.join(data_folder, "stocks.json"), encoding='utf-8') as stock_file:
stock = json.loads(stock_file.read())
for stock_object in stock['produkty']['produkt']:
# print(stock_object)
# if stock_object['id'] == id:
price_ov_raw = stock_object['cena_katalogowa']
price_ov = decimal.Decimal(price_ov_raw)
cost_price_raw = stock_object['cena_po_rabacie']
cost_price = decimal.Decimal(cost_price_raw)
sku = stock_object['kod']
print(price_ov * rate)
print(cost_price * rate)
stocks_update = {
"price_override": Money(price_ov * rate, 'UAH'),
"cost_price": Money(cost_price * rate, 'UAH'),
}
stock = ProductVariant.objects.get(sku=sku)
for key, value in stocks_update.items():
setattr(stock, key, value)
stock.save()
display_format = "\nCurrency, {}, has been edited."
print(display_format.format(stock))
def handle(self, *args, **options):
"""
Makes a GET request to the API.
"""
self.import_currencies()
|
import warnings
import torch
import torch.fx
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
def trt_transposed_matmul(lhs: torch.Tensor, rhs: torch.Tensor, lhs_transposed: bool, rhs_transposed: bool):
if lhs_transposed:
lhs = lhs.transpose(-1, -2)
if rhs_transposed:
rhs = rhs.transpose(-1, -2)
return torch.matmul(lhs, rhs)
def fuse_permute_matmul(gm: torch.fx.GraphModule):
"""
Fuse pattern like permute + matmul if permute is transposing the last two dimension.
"""
def check_permute(node: torch.fx.Node):
ranks = len(node.meta["tensor_meta"].shape)
permutation = list(i % ranks for i in node.kwargs["permutation"]) # type: ignore[union-attr]
allowed_permutation = list(i for i in range(ranks))
allowed_permutation[-1] = ranks - 2
allowed_permutation[-2] = ranks - 1
return len(node.users) == 1 and permutation == allowed_permutation
for node in gm.graph.nodes:
if node.target == acc_ops.matmul:
lhs, rhs = node.kwargs["input"], node.kwargs["other"]
lhs_transposed = rhs_tranposed = False
if lhs.target == acc_ops.permute and check_permute(lhs):
lhs_transposed = True
lhs = lhs.kwargs["input"]
if rhs.target == acc_ops.permute and check_permute(rhs):
rhs_tranposed = True
rhs = rhs.kwargs["input"]
if lhs_transposed or rhs_tranposed:
with gm.graph.inserting_before(node):
fused_node = gm.graph.call_function(trt_transposed_matmul, args=(lhs, rhs, lhs_transposed, rhs_tranposed))
node.replace_all_uses_with(fused_node)
gm.graph.eliminate_dead_code()
gm.recompile()
return gm
try:
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
except Exception:
warnings.warn("Unable to import TensorRT related libraries.")
else:
@tensorrt_converter(trt_transposed_matmul)
def trt_transposed_matmul_converter(network, target, args, kwargs, name):
lhs, rhs, lhs_transposed, rhs_transposed = args
for i in [lhs, rhs]:
if not isinstance(i, trt.tensorrt.ITensor):
raise RuntimeError(
f"trt_transposed_matmul received input {i} that is not part "
"of the TensorRT region!"
)
layer = network.add_matrix_multiply(
lhs,
trt.MatrixOperation.TRANSPOSE if lhs_transposed else trt.MatrixOperation.NONE,
rhs,
trt.MatrixOperation.TRANSPOSE if rhs_transposed else trt.MatrixOperation.NONE,
)
layer.name = name
return layer.get_output(0)
|
import numpy as np
import pybullet as p
import time
import matplotlib.pyplot as plt
from cep.envs import Tiago_LeftParallelHand_Base
from cep.cep_models import cep_tiago_base_pathplan_x
import torch
joint_limit_buffers = 0.02
joint_limits = np.array([2.5, 2.5, 3.1416, 2.75, 1.57, 3.53, 2.35, 2.09, 1.57, 2.09]) - joint_limit_buffers
device = torch.device('cpu')
class CEPPolicy():
def __init__(self, dt=1 / 240., dtype='float64'):
self.dt = dt
self.dtype = dtype
self.controller = cep_tiago_base_pathplan_x()
def policy(self, state):
joint_poses = state[0, 0:10]
joint_vels = state[0, 10:]
action = self.controller.policy(state)
x, dx = self.step(joint_poses, joint_vels, action)
return x, dx
def step(self, joint_poses, joint_vels, joint_accs):
joint_vels = joint_vels + joint_accs * self.dt
joint_poses = joint_poses + joint_vels * self.dt
return joint_poses, joint_vels
def experiment():
'''
envlist:[it can take values [1,2,3]. It provides the type of environment we are using]
results_dir: path to the folder in which we are saving the results
'''
time_step = 1 / 240.
env = Tiago_LeftParallelHand_Base(time_step=time_step)
policy = CEPPolicy(dt=time_step)
################
n_trials = 100
horizon = 5000
c = 0
s = 0
REWARD = 0
END_POSITION = None
for itr in range(n_trials):
print('###Iteration: {}'.format(itr))
state = env.reset()
p.addUserDebugLine([0., 0., -0.189], [1.5, 0., -0.189], [1., 0., 0.])
q_list = []
for i in range(horizon):
init = time.time()
#### Get Control Action (Position Control)####
a = policy.policy(state)
state, reward, done, success, q_vals = env.step(a)
#print(state)
# TODO: Record joint values 07.10
#q_list.append(q_vals)
#############################
end = time.time()
time.sleep(np.clip(time_step - (end - init), 0, time_step))
if i == (horizon-1):
REWARD = reward
END_POSITION = env.check_endPosition()
print('Position state: ', state[0])
print('Distance:', REWARD)
print('End position: ', END_POSITION)
print('Desired position', env.Target_pos)
#plot_joints(q_list, horizon)
p.disconnect()
if __name__ == '__main__':
p.connect(p.GUI_SERVER, 1234,
options='--background_color_red=1. --background_color_green=1. --background_color_blue=1.')
p.resetDebugVisualizerCamera(2.2, 55.6, -47.4, [0.04, 0.06, 0.31])
experiment()
|
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription([
Node(
package='ackermann_controller',
executable='ackermann_teleop_joy',
parameters=[
{'max_speed': 1},
{'max_steering_angle': 1}
]
),
Node(
package='joy',
executable='joy_node'
)
]) |
import random
import math
import typing
from misc import module_utils
import torch
from torch import nn
from torch.nn import functional
from torch.nn import init
from torchvision import models
def default_conv(
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int=1,
padding: typing.Optional[int]=None,
bias=True,
padding_mode: str='zeros'):
if padding is None:
padding = (kernel_size - 1) // 2
conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=bias,
padding_mode=padding_mode,
)
return conv
def get_model(m, cfg, *args, make=True, conv=default_conv, **kwargs):
'''
Automatically find a class implementation and instantiate it.
Args:
m (str): Name of the module.
cfg (Namespace): Global configurations.
args (list): Additional arguments for the model class.
make (bool, optional): If set to False, return model class itself.
conv
kwargs (dict): Additional keyword arguments for the model class.
'''
model_class = module_utils.find_representative(m)
if model_class is not None:
if hasattr(model_class, 'get_kwargs'):
model_kwargs = model_class.get_kwargs(cfg, conv=conv)
else:
model_kwargs = kwargs
if make:
return model_class(*args, **model_kwargs)
else:
return model_class
else:
raise NotImplementedError('The model class is not implemented!')
def model_class(model_cls, cfg=None, make=True, conv=default_conv):
if make and hasattr(model_cls, 'get_kwargs'):
return model_cls(**model_cls.get_kwargs(cfg, conv=conv))
else:
return model_cls
def init_gans(target):
for m in target.modules():
if isinstance(m, nn.modules.conv._ConvNd):
m.weight.data.normal_(0.0, 0.02)
if hasattr(m, 'bias') and m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.02)
if hasattr(m, 'bias') and m.bias is not None:
m.bias.data.zero_()
def append_module(m, name, n_feats):
if name is None:
return
if name == 'batch':
m.append(nn.BatchNorm2d(n_feats))
elif name == 'layer':
m.append(nn.GroupNorm(1, n_feats))
elif name == 'instance':
m.append(nn.InstanceNorm2d(n_feats))
if name == 'relu':
m.append(nn.ReLU(True))
elif name == 'lrelu':
m.append(nn.LeakyReLU(negative_slope=0.2, inplace=True))
elif name == 'prelu':
m.append(nn.PReLU())
class MeanShift(nn.Conv2d):
'''
Re-normalize input w.r.t given mean and std.
This module assume that input lies in between -1 ~ 1
'''
def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
'''
Default values are ImageNet mean and std.
'''
super(MeanShift, self).__init__(3, 3, kernel_size=1)
mean = torch.Tensor(mean)
std = torch.Tensor(std)
self.weight.data.copy_(torch.diag(0.5 / std).view(3, 3, 1, 1))
self.bias.data.copy_((0.5 - mean) / std)
for p in self.parameters():
p.requires_grad = False
class BasicBlock(nn.Sequential):
'''
Make a basic block which consists of Conv-(Norm)-(Act).
Args:
in_channels (int): Conv in_channels.
out_channels (int): Conv out_channels.
kernel_size (int): Conv kernel_size.
stride (int, default=1): Conv stride.
norm (<None> or 'batch' or 'layer'): Norm function.
act (<'relu'> or 'lrelu' or 'prelu'): Activation function.
conv (funcion, optional): A function for making a conv layer.
'''
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int=1,
padding: typing.Optional[int]=None,
norm: typing.Optional[str]=None,
act: typing.Optional[str]='relu',
bias: bool=None,
padding_mode: str='zeros',
conv=default_conv):
if bias is None:
bias = norm is None
m = [conv(
in_channels,
out_channels,
kernel_size,
bias=bias,
stride=stride,
padding=padding,
padding_mode=padding_mode,
)]
append_module(m, norm, out_channels)
append_module(m, act, out_channels)
super().__init__(*m)
class BasicTBlock(BasicBlock):
def __init__(self, *args, **kwargs):
kwargs['conv'] = nn.ConvTranspose2d
super().__init__(*args, **kwargs)
class ResBlock(nn.Sequential):
'''
Make a residual block which consists of Conv-(Norm)-Act-Conv-(Norm).
Args:
n_feats (int): Conv in/out_channels.
kernel_size (int): Conv kernel_size.
norm (<None> or 'batch' or 'layer'): Norm function.
act (<'relu'> or 'lrelu' or 'prelu'): Activation function.
res_scale (float, optional): Residual scaling.
conv (funcion, optional): A function for making a conv layer.
Note:
Residual scaling:
From Szegedy et al.,
"Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning"
See https://arxiv.org/pdf/1602.07261.pdf for more detail.
To modify stride, change the conv function.
'''
def __init__(
self,
n_feats: int,
kernel_size: int,
norm: typing.Optional[str]=None,
act: str='relu',
res_scale: float=1,
res_prob: float=1,
padding_mode: str='zeros',
conv=default_conv) -> None:
bias = norm is None
m = []
for i in range(2):
m.append(conv(
n_feats,
n_feats,
kernel_size,
bias=bias,
padding_mode=padding_mode,
))
append_module(m, norm, n_feats)
if i == 0:
append_module(m, act, n_feats)
super().__init__(*m)
self.res_scale = res_scale
self.res_prob = res_prob
return
def forward(self, x):
if self.training and random.random() > self.res_prob:
return x
x = x + self.res_scale * super(ResBlock, self).forward(x)
return x
class Upsampler(nn.Sequential):
'''
Make an upsampling block using sub-pixel convolution
Args:
Note:
From Shi et al.,
"Real-Time Single Image and Video Super-Resolution
Using an Efficient Sub-pixel Convolutional Neural Network"
See https://arxiv.org/pdf/1609.05158.pdf for more detail
'''
def __init__(
self,
scale: int,
n_feats: int,
norm: typing.Optional[str]=None,
act: typing.Optional[str]=None,
bias: bool=True,
padding_mode: str='zeros',
conv=default_conv):
bias = norm is None
m = []
log_scale = math.log(scale, 2)
# check if the scale is power of 2
if int(log_scale) == log_scale:
for _ in range(int(log_scale)):
m.append(conv(
n_feats,
4 * n_feats,
3,
bias=bias,
padding_mode=padding_mode,
))
m.append(nn.PixelShuffle(2))
append_module(m, norm, n_feats)
append_module(m, act, n_feats)
elif scale == 3:
m.append(conv(
n_feats,
9 * n_feats,
3,
bias=bias,
padding_mode=padding_mode,
))
m.append(nn.PixelShuffle(3))
append_module(m, norm, n_feats)
append_module(m, act, n_feats)
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class UpsamplerI(nn.Module):
'''
Interpolation based upsampler
'''
def __init__(
self, scale, n_feats, algorithm='nearest', activation=True, conv=default_conv):
super(UpsamplerI, self).__init__()
log_scale = int(math.log(scale, 2))
self.algorithm = algorithm
self.activation = activation
self.convs = nn.ModuleList([
conv(n_feats, n_feats, 3) for _ in range(log_scale)
])
def forward(self, x):
for conv in self.convs:
x = functional.interpolate(x, scale_factor=2, mode=self.algorithm)
x = conv(x)
if self.activation:
x = functional.leaky_relu(x, negative_slope=0.2, inplace=True)
return x
class PixelSort(nn.Module):
'''
An inverse operation of nn.PixelShuffle. Only for scale 2.
'''
def __init__(self):
super(PixelSort, self).__init__()
def forward(self, x):
'''
Tiling input into smaller resolutions.
Args:
x (Tensor):
Return:
Tensor:
Example::
>>> x = torch.Tensor(16, 64, 256, 256)
>>> ps = PixelSort()
>>> y = ps(x)
>>> y.size()
torch.Size([16, 256, 128, 128])
'''
'''
_, c, h, w = x.size()
#h //= self.scale
#w //= self.scale
#x = x.view(-1, c, h, self.scale, w, self.scale)
x = x.permute(0, 1, 3, 5, 2, 4).contiguous()
#x = x.view(-1, self.scale**2 * c, h, w)
'''
# we have a jit compatibility issue with the code above...
from_zero = slice(0, None, 2)
from_one = slice(1, None, 2)
tl = x[..., from_zero, from_zero]
tr = x[..., from_zero, from_one]
bl = x[..., from_one, from_zero]
br = x[..., from_one, from_one]
x = torch.cat((tl, tr, bl, br), dim=1)
return x
class Downsampler(nn.Sequential):
def __init__(
self, scale, n_feats,
norm=None, act=None, conv=default_conv):
bias = norm is None
m = []
log_scale = math.log(scale, 2)
if int(log_scale) == log_scale:
for _ in range(int(log_scale)):
m.append(PixelSort())
m.append(conv(4 * n_feats, n_feats, 3, bias=bias))
append_module(m, norm, n_feats)
append_module(m, act, n_feats)
else:
raise NotImplementedError
super(Downsampler, self).__init__(*m)
def extract_vgg(name):
gen = models.vgg19(pretrained=True).features
vgg = None
configs = (
'11', '12',
'21', '22',
'31', '32', '33', '34',
'41', '42', '43', '44',
'51', '52', '53', '54',
)
sub_mean = MeanShift()
def sub_vgg(config):
sub_modules = [sub_mean]
pool_idx = 0
conv_idx = 0
pools = int(config[0])
convs = int(config[1])
for m in gen:
if convs == 0:
return sub_mean
sub_modules.append(m)
if isinstance(m, nn.Conv2d):
conv_idx += 1
elif isinstance(m, nn.MaxPool2d):
conv_idx = 0
pool_idx += 1
if conv_idx == convs and pool_idx == pools - 1:
return nn.Sequential(*sub_modules)
for config in configs:
if config in name:
vgg = sub_vgg(config)
break
if vgg is None:
vgg = sub_vgg('54')
return vgg
def extract_resnet(name):
configs = ('18', '34', '50', '101', '152')
resnet = models.resnet50
for config in configs:
if config in name:
resnet = getattr(models, 'resnet{}'.format(config))
break
resnet = resnet(pretrained=True)
resnet.avgpool = nn.AdaptiveAvgPool2d(1)
resnet.fc = nn.Identity()
resnet.eval()
resnet_seq = nn.Sequential(MeanShift(), resnet)
return resnet_seq
if __name__ == '__main__':
'''
torch.set_printoptions(precision=3, linewidth=120)
with torch.no_grad():
x = torch.arange(64).view(1, 1, 8, 8).float()
ps = Downsampler(2, 1)
print(ps(x))
from torch import jit
jit_traced = jit.trace(ps, x)
print(jit_traced.graph)
print(jit_traced)
jit_traced.save('jit_test.pt')
jit_load = jit.load('jit_test.pt')
print(jit_load(x))
'''
x = 2 * torch.rand(1, 3, 4, 4) - 1
print(x)
ms = MeanShift()
print(ms(x))
|
from brownie import ERC20Basic, config, accounts
def deployContract():
account = accounts.add(config["wallets"]["from_key"])
ERC20Basic.deploy(100000000,{'from': account})
def main():
deployContract()
|
#! /usr/bin/env python
#
# Copyright (c) 2019 Daw Lab
# https://dawlab.princeton.edu/
import os, sys
from setuptools import setup, find_packages
path = os.path.abspath(os.path.dirname(__file__))
## Metadata
DISTNAME = 'sisyphus'
MAINTAINER = 'Sam Zorowitz'
MAINTAINER_EMAIL = 'zorowitz@princeton.edu'
DESCRIPTION = 'Evaluating sequential choice biases in anxiety disorders'
URL = 'https://dawlab.princeton.edu/'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/ndawlab/seqanx'
with open(os.path.join(path, 'README.rst'), encoding='utf-8') as readme_file:
README = readme_file.read()
with open(os.path.join(path, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
VERSION = None
with open(os.path.join('sisyphus', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
VERSION = line.split('=')[1].strip().strip('\'')
break
if VERSION is None:
raise RuntimeError('Could not determine version')
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=README,
packages=find_packages(exclude=['docs', 'tests']),
install_requires=requirements,
license=LICENSE
) |
"""Dummy command workers module for various test cases."""
|
"""Utility functions for MongoDB document insertion, updates and retrieval."""
from typing import (Any, List, Mapping, Optional)
from bson.objectid import ObjectId
from pymongo.collection import ReturnDocument
from pymongo import collection as Collection
def find_one_latest(collection: Collection) -> Optional[Mapping[Any, Any]]:
"""Returns newest/latest object, stripped of the object id, or None if no
object exists: collection.
"""
try:
return collection.find(
{},
{'_id': False}
).sort([('_id', -1)]).limit(1).next()
except StopIteration:
return None
def find_id_latest(collection: Collection) -> Optional[ObjectId]:
"""Returns object id of newest/latest object, or None if no object exists.
"""
try:
return collection.find().sort([('_id', -1)]).limit(1).next()['_id']
except StopIteration:
return None
def update_run_state(
collection: Collection,
task_id: str,
state: str = 'UNKNOWN'
) -> Optional[Mapping[Any, Any]]:
"""Updates state of workflow run and returns document."""
return collection.find_one_and_update(
{'task_id': task_id},
{'$set': {'api.state': state}},
return_document=ReturnDocument.AFTER
)
def upsert_fields_in_root_object(
collection: Collection,
task_id: str,
root: str,
**kwargs
) -> Optional[Mapping[Any, Any]]:
"""Inserts (or updates) fields in(to) the same root (object) field and
returns document.
"""
return collection.find_one_and_update(
{'task_id': task_id},
{'$set': {
'.'.join([root, key]):
value for (key, value) in kwargs.items()
}},
return_document=ReturnDocument.AFTER
)
def update_tes_task_state(
collection: Collection,
task_id: str,
tes_id: str,
state: str
) -> Optional[Mapping[Any, Any]]:
"""Updates `state` field in TES task log and returns updated document."""
return collection.find_one_and_update(
{'task_id': task_id, 'api.task_logs': {'$elemMatch': {'id': tes_id}}},
{'$set': {'api.task_logs.$.state': state}},
return_document=ReturnDocument.AFTER
)
def append_to_tes_task_logs(
collection: Collection,
task_id: str,
tes_log: Mapping,
) -> Optional[Mapping[Any, Any]]:
"""Appends task log to TES task logs and returns updated document."""
return collection.find_one_and_update(
{'task_id': task_id},
{'$push': {'api.task_logs': tes_log}},
return_document=ReturnDocument.AFTER
)
def find_tes_task_ids(
collection: Collection,
run_id: str
) -> List:
"""Get list of TES task ids associated with a run of interest."""
return collection.distinct('api.task_logs.id', {'run_id': run_id})
|
# -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
from collections import defaultdict
from copy import deepcopy
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Sequence,
Set,
Tuple,
TypeVar,
)
from ... import mathtools, app
from ...constants import Mood, TrainingType
from .. import condition
from ..context import Context
from ..race import Race
from ..training import Training
from .effect import Effect
from .globals import g
if TYPE_CHECKING:
from .item import Item
_effect_reducers: List[_EffectReducer] = []
class Buff:
def __init__(
self,
rate: float,
*,
turn_count: int,
priority: int,
unique_key: Any,
) -> None:
self.rate = rate
self.turn_count = turn_count
self.priority = priority
self.unique_key = unique_key
class BuffList:
def __init__(self, v: Iterable[Buff] = ()) -> None:
self._l: List[Buff] = list(v)
def __iter__(self):
yield from self._l
def __len__(self):
return len(self._l)
def __bool__(self):
return len(self) > 0
def add(self, v: Buff) -> None:
if v.unique_key is None:
self._l.append(v)
return
if any(
i
for i in self._l
if i.unique_key == v.unique_key and i.priority > v.priority
):
return
self._l = [*(i for i in self._l if i.unique_key != v.unique_key), v]
def total_rate(self) -> float:
return sum(i.rate for i in self)
T = TypeVar("T", bound=Buff)
def _estimate_failure_rate(ctx: Context, trn: Training) -> float:
return mathtools.interpolate(
int(ctx.vitality * 10000),
(
(0, 0.85),
(1500, 0.7),
(4000, 0.0),
)
if trn.wisdom > 0
else (
(0, 0.99),
(1500, 0.8),
(3000, 0.5),
(5000, 0.15),
(7000, 0.0),
),
)
class EffectSummary:
def __init__(self) -> None:
self.speed = 0
self.statmia = 0
self.power = 0
self.guts = 0
self.wisdom = 0
self.vitality = 0
self.max_vitality = 0
self.mood = 0
self.condition_add: Set[int] = set()
self.condition_remove: Set[int] = set()
self.training_levels: DefaultDict[TrainingType, int] = defaultdict(lambda: 0)
self.training_effect_buff: DefaultDict[TrainingType, BuffList] = defaultdict(
BuffList
)
self.training_vitality_debuff: DefaultDict[
TrainingType, BuffList
] = defaultdict(BuffList)
self.training_partner_reassign = False
self.training_no_failure = False
self.race_fan_buff = BuffList()
self.race_reward_buff = BuffList()
self.support_friendship = 0
# SELECT t1."index", t1.text, t2.text from text_data as t1 LEFT JOIN text_data as t2 WHERE t1.category == 6 AND t2."index" == t1."index" AND t2.category = 170;
# 9002 = 秋川理事長
self.character_friendship: Dict[int, int] = {}
self.unknown_effects: Sequence[Effect] = ()
self.known_effects: Sequence[Effect] = ()
def add(self, item: Item, age: int = 0):
for effect in item.effects:
if effect.turn_count < age:
continue
for i in _effect_reducers:
if i(item, effect, self):
self.known_effects = (
*self.known_effects,
effect,
)
break
else:
self.unknown_effects = (
*self.unknown_effects,
effect,
)
def clone(self) -> EffectSummary:
return deepcopy(self)
def apply_to_training(self, ctx: Context, training: Training) -> Training:
"""
return a copy of given training with effect applied.
"""
t_after = training.clone()
explain = ""
ctx_after = self.apply_to_context(ctx)
effect_rate = 1
# mood
r = ctx_after.mood.training_rate - ctx.mood.training_rate
if r:
explain += f"{r*100:+.0f}% by mood;"
effect_rate *= 1 + r
# buff
r = self.training_effect_buff[training.type].total_rate()
if r:
explain += f"{r*100:+.0f}% by buff;"
effect_rate *= 1 + r
r = effect_rate
if r != 1:
t_after.speed = round(t_after.speed * r)
t_after.stamina = round(t_after.stamina * r)
t_after.power = round(t_after.power * r)
t_after.guts = round(t_after.guts * r)
t_after.wisdom = round(t_after.wisdom * r)
# vitality debuff
r = self.training_vitality_debuff[training.type].total_rate()
if r:
explain += f"{r*100:+.0f}% vitality;"
t_after.vitality *= 1 + r
f_before = _estimate_failure_rate(ctx, training)
f_after = _estimate_failure_rate(ctx_after, t_after)
f = mathtools.clamp(
f_after - f_before, -training.failure_rate, 1 - training.failure_rate
)
if f:
explain += f"{f*100:+.0f}% failure;"
t_after.failure_rate += f
if self.training_no_failure:
explain += f"no failure;"
t_after.failure_rate = 0
if explain and g.explain_effect_summary:
app.log.text(
"apply to training: %s->%s: %s" % (training, t_after, explain),
level=app.DEBUG,
)
assert 0.0 <= t_after.failure_rate <= 1.0, t_after.failure_rate
return t_after
def reduce_on_training(self, training: Training) -> Tuple[Training, EffectSummary]:
"""Reduce effect for item score sample (remove buff so we can apply other conflicted buff)."""
es_remains = self.clone()
t_before = training.clone()
explain = ""
# effect buff
effect_rate = 1
r = es_remains.training_effect_buff[t_before.type].total_rate()
del es_remains.training_effect_buff[t_before.type]
if r:
explain += f"{r*100:+.0f}% by buff;"
effect_rate *= 1 + r
r = effect_rate
if r != 1:
t_before.speed = round(t_before.speed / r)
t_before.stamina = round(t_before.stamina / r)
t_before.power = round(t_before.power / r)
t_before.guts = round(t_before.guts / r)
t_before.wisdom = round(t_before.wisdom / r)
# vitality debuff
r = es_remains.training_vitality_debuff[t_before.type].total_rate()
del es_remains.training_vitality_debuff[t_before.type]
if r:
explain += f"{r*100:+.0f}% vitality;"
t_before.vitality /= 1 + r
if explain and g.explain_effect_summary:
app.log.text(
"revert from training: %s->%s: %s" % (training, t_before, explain),
level=app.DEBUG,
)
return t_before, es_remains
def apply_to_race(self, ctx: Context, race: Race) -> Race:
r_after = race.clone()
explain = ""
r = self.race_fan_buff.total_rate()
if r:
explain = f"{r*100:+.0f}% fans;"
r_after.fan_counts = tuple(round(i * (1 + r)) for i in r_after.fan_counts)
r = self.race_reward_buff.total_rate()
if r:
explain = f"{r*100:+.0f}% reward;"
r_after.raward_buff += r
if explain and g.explain_effect_summary:
app.log.text("apply to race: %s: %s" % (race, explain), level=app.DEBUG)
return r_after
def apply_to_context(self, ctx: Context) -> Context:
ctx_after = ctx.clone()
explain = ""
# mood
all_moods = list(Mood)
i_before = all_moods.index(ctx.mood)
i_after = mathtools.clamp(i_before + self.mood, 0, len(all_moods) - 1)
if i_before != i_after:
ctx_after.mood = all_moods[i_after]
explain += f"mood {ctx.mood} -> {ctx_after.mood};"
min_property = 1
max_property = 1200
if self.speed:
ctx_after.speed = mathtools.clamp(
ctx.speed + self.speed, min_property, max_property
)
explain += f"{ctx_after.speed - ctx.speed:+d} speed;"
if self.statmia:
ctx_after.stamina = mathtools.clamp(
ctx.stamina + self.statmia, min_property, max_property
)
explain += f"{ctx_after.stamina - ctx.stamina:+d} stamina;"
if self.power:
ctx_after.power = mathtools.clamp(
ctx.power + self.power, min_property, max_property
)
explain += f"{ctx_after.power - ctx.power:+d} power;"
if self.guts:
ctx_after.guts = mathtools.clamp(
ctx.guts + self.guts, min_property, max_property
)
explain += f"{ctx_after.guts - ctx.guts:+d} guts;"
if self.wisdom:
ctx_after.wisdom = mathtools.clamp(
ctx.wisdom + self.wisdom, min_property, max_property
)
explain += f"{ctx_after.wisdom - ctx.wisdom:+d} wisdom;"
if self.max_vitality:
ctx_after.max_vitality = mathtools.clamp(
ctx.max_vitality + self.max_vitality, 1, 150
)
explain += f"{ctx_after.max_vitality - ctx.max_vitality:+d} max vitality;"
if self.vitality:
ctx_after.vitality = mathtools.clamp(
ctx.vitality + self.vitality / ctx.max_vitality, 0.0, 1.0
)
explain += f"{ctx_after.vitality - ctx.vitality:+.2f} vitality;"
for t, lv_effect in self.training_levels.items():
lv_before = ctx.training_levels.get(t)
if not lv_before:
# ignore if training level unknown
continue
lv_after = mathtools.clamp(lv_before + lv_effect, 1, 5)
ctx_after.training_levels[t] = lv_after
explain += f"{lv_after - lv_before:+d} {t.name} training level;"
c = self.condition_add.difference(ctx.conditions)
if c:
explain += f"add condition {','.join(condition.get(i).name for i in c)};"
ctx_after.conditions.update(self.condition_add)
c = self.condition_remove.intersection(ctx.conditions)
if c:
explain += f"remove condition {','.join(condition.get(i).name for i in c)};"
ctx_after.conditions.difference_update(c)
if explain and g.explain_effect_summary:
app.log.text("apply to context: %s" % explain, level=app.DEBUG)
return ctx_after
if TYPE_CHECKING:
_EffectReducer = Callable[[Item, Effect, EffectSummary], bool]
def _only_effect_type(effect_type: int):
def _wrapper(fn: _EffectReducer) -> _EffectReducer:
def _func(item: Item, effect: Effect, summary: EffectSummary) -> bool:
if effect.type != effect_type:
return False
return fn(item, effect, summary)
return _func
return _wrapper
def _register_reducer(fn: _EffectReducer):
_effect_reducers.append(fn)
return fn
@_register_reducer
@_only_effect_type(Effect.TYPE_PROPERTY)
def _(item: Item, effect: Effect, summary: EffectSummary):
prop, value, _, _ = effect.values
if prop == Effect.PROPERTY_SPEED:
summary.speed += value
return True
if prop == Effect.PROPERTY_STAMINA:
summary.statmia += value
return True
if prop == Effect.PROPERTY_POWER:
summary.power += value
return True
if prop == Effect.PROPERTY_GUTS:
summary.guts += value
return True
if prop == Effect.PROPERTY_WISDOM:
summary.wisdom += value
return True
if prop == Effect.PROPERTY_STAMINA:
summary.statmia += value
return True
if prop == Effect.PROPERTY_MAX_VITALITY:
summary.max_vitality += value
return True
if prop == Effect.PROPERTY_VITALITY:
summary.vitality += value
return True
if prop == Effect.PROPERTY_MOOD:
summary.mood += value
return True
return False
@_register_reducer
@_only_effect_type(Effect.TYPE_TRAINING_LEVEL)
def _(item: Item, effect: Effect, summary: EffectSummary):
lv, value, _, _ = effect.values
def _add_value(t: TrainingType):
summary.training_levels[t] = summary.training_levels.get(t, 0) + value
if lv == Effect.TRAINING_LEVEL_SPEED:
_add_value(TrainingType.SPEED)
return True
if lv == Effect.TRAINING_LEVEL_STAMINA:
_add_value(TrainingType.STAMINA)
return True
if lv == Effect.TRAINING_LEVEL_GUTS:
_add_value(TrainingType.GUTS)
return True
if lv == Effect.TRAINING_LEVEL_POWER:
_add_value(TrainingType.POWER)
return True
if lv == Effect.TRAINING_LEVEL_WISDOM:
_add_value(TrainingType.WISDOM)
return True
return False
@_register_reducer
@_only_effect_type(Effect.TYPE_CONDITION)
def _(item: Item, effect: Effect, summary: EffectSummary):
action, value, _, _ = effect.values
if action == Effect.CONDITION_ADD:
summary.condition_add.add(value)
return True
if action == Effect.CONDITION_REMOVE:
summary.condition_remove.add(value)
return True
return False
@_register_reducer
@_only_effect_type(Effect.TYPE_TRAINING_PARTNER_REASSIGN)
def _(item: Item, effect: Effect, summary: EffectSummary):
if effect.values == (0, 0, 0, 0):
summary.training_partner_reassign = True
return True
return False
@_register_reducer
@_only_effect_type(Effect.TYPE_TRAINING_BUFF)
def _(item: Item, effect: Effect, summary: EffectSummary):
tp, value, _, _ = effect.values
def add(t: TrainingType):
summary.training_effect_buff[t].add(
Buff(
value / 100,
turn_count=effect.turn_count,
priority=item.effect_priority,
unique_key=tp,
)
)
if tp == 0:
add(TrainingType.SPEED)
add(TrainingType.STAMINA)
add(TrainingType.POWER)
add(TrainingType.GUTS)
add(TrainingType.WISDOM)
return True
if tp == Effect.TRAINING_LEVEL_SPEED:
add(TrainingType.SPEED)
return True
if tp == Effect.TRAINING_LEVEL_STAMINA:
add(TrainingType.STAMINA)
return True
if tp == Effect.TRAINING_LEVEL_POWER:
add(TrainingType.POWER)
return True
if tp == Effect.TRAINING_LEVEL_GUTS:
add(TrainingType.GUTS)
return True
return False
@_register_reducer
@_only_effect_type(Effect.TYPE_TRAINING_VITALITY_DEBUFF)
def _(item: Item, effect: Effect, summary: EffectSummary):
tp, value, _, _ = effect.values
def add(t: TrainingType):
summary.training_vitality_debuff[t].add(
Buff(
value / 100,
turn_count=effect.turn_count,
priority=item.effect_priority,
unique_key=tp,
)
)
if tp == Effect.TRAINING_LEVEL_SPEED:
add(TrainingType.SPEED)
return True
if tp == Effect.TRAINING_LEVEL_STAMINA:
add(TrainingType.STAMINA)
return True
if tp == Effect.TRAINING_LEVEL_POWER:
add(TrainingType.POWER)
return True
if tp == Effect.TRAINING_LEVEL_GUTS:
add(TrainingType.GUTS)
return True
return False
@_register_reducer
@_only_effect_type(Effect.TYPE_TRAINING_NO_FAILURE)
def _(item: Item, effect: Effect, summary: EffectSummary):
if effect.values == (0, 0, 0, 0):
summary.training_no_failure = True
return True
return False
@_register_reducer
@_only_effect_type(Effect.TYPE_RACE_BUFF)
def _(item: Item, effect: Effect, summary: EffectSummary):
tp, value, _, _ = effect.values
if tp == Effect.RACE_BUFF_REWARD:
summary.race_reward_buff.add(
Buff(
value / 100,
turn_count=1,
priority=item.effect_priority,
unique_key=tp,
)
)
return True
if tp == Effect.RACE_BUFF_FAN:
summary.race_fan_buff.add(
Buff(
value / 100,
turn_count=1,
priority=item.effect_priority,
unique_key=tp,
)
)
return True
return False
@_register_reducer
@_only_effect_type(Effect.TYPE_FRIENDSHIP)
def _(item: Item, effect: Effect, summary: EffectSummary):
tp, c, value, _ = effect.values
if tp == Effect.FRIENDSHIP_SUPPORT:
assert c == 0, 0
summary.support_friendship += value
return True
if tp == Effect.FRIENDSHIP_CHARACTER:
summary.character_friendship[c] = summary.character_friendship.get(c, 0) + value
return True
return False
|
import json
from datetime import datetime as dt
import requests
from django.contrib import admin
from django.conf import settings
from django import forms
from .models import History
class HistoryAdminForm(forms.ModelForm):
class Meta:
model = History
widgets = {}
fields = '__all__'
def __init__(self, *args, **kwargs):
super(HistoryAdminForm, self).__init__(*args, **kwargs)
class HistoryAdmin(admin.ModelAdmin):
form = HistoryAdminForm
list_display = ('title', 'file', 'field', 'category', 'create_time')
search_fields = ('title','file',)
admin.site.register(History, HistoryAdmin)
|
#!/usr/bin/env python3
'''
Пример объектной организации кода
'''
from tkinter import *
from tkinter import colorchooser
class App(Frame):
'''Base framed application class'''
def __init__(self, master=None, Title="Application"):
Frame.__init__(self, master)
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
self.master.title(Title)
self.grid(sticky=N+E+S+W)
self.create()
self.adjust()
def create(self):
'''Create all the widgets'''
self.bQuit = Button(self, text='Quit', command=self.quit)
self.bQuit.grid()
def adjust(self):
'''Adjust grid sise/properties'''
# TODO Smart detecting resizeable/still cells
for i in range(self.size()[0]):
self.columnconfigure(i, weight=12)
for i in range(self.size()[1]):
self.rowconfigure(i, weight=12)
class Paint(Canvas):
'''Canvas with simple drawing'''
def mousedown(self, event):
'''Store mousedown coords'''
self.x0, self.y0 = event.x, event.y
self.cursor = None
def mousemove(self, event):
'''Do sometheing when drag a mouse'''
if self.cursor:
self.delete(self.cursor)
self.cursor = self.create_line((self.x0, self.y0, event.x, event.y), fill=self.foreground.get())
def mouseup(self, event):
'''Dragging is done'''
self.cursor = None
#print(self.find_all())
def __init__(self, master=None, *ap, foreground="black", **an):
self.foreground = StringVar()
self.foreground.set(foreground)
Canvas.__init__(self, master, *ap, **an)
self.bind("<Button-1>", self.mousedown)
self.bind("<B1-Motion>", self.mousemove)
self.bind("<ButtonRelease-1>", self.mouseup)
class MyApp(App):
def askcolor(self):
self.Canvas.foreground.set(colorchooser.askcolor()[1])
def create(self):
self.Canvas = Paint(self, foreground="midnightblue")
self.Canvas.grid(row=0, column=0, rowspan=3, sticky=N+E+S+W)
self.AskColor = Button(self, text="Color", command=self.askcolor)
self.AskColor.grid(row=0, column=1, sticky=N+W)
self.ShowColor = Label(self, textvariable=self.Canvas.foreground)
self.ShowColor.grid(row=1, column=1, sticky=N+W+E)
self.Quit = Button(self, text="Quit", command=self.quit)
self.Quit.grid(row=2, column=1, sticky=N+W)
app = MyApp(Title="Canvas Example")
app.mainloop()
for item in app.Canvas.find_all():
print(*app.Canvas.coords(item), app.Canvas.itemcget(item, "fill"))
|
#!/usr/bin/env python
import sys
def fun(gtf):
D = {}
with open(gtf) as handle:
for l in handle:
if l.startswith('#') or l.startswith('\n'):
continue
tmplist = l.split('\t')
qStart = int(tmplist[3])
qEnd = int(tmplist[4])
items = tmplist[8].split(';')
# qName = items[1][5:]
tName = tmplist[0]
strand = tmplist[6]
blockSize = qEnd - qStart + 1
if tmplist[2] == 'gene':
transcript = ''
elif 'mRNA' == tmplist[2]:
transcript = items[0][3:]
D[transcript] = [strand, transcript, tName, [], [], [], (qStart,qEnd)]
elif 'CDS' == tmplist[2] and transcript:
D[transcript][3].append(qStart)
D[transcript][4].append(qEnd)
D[transcript][5].append(blockSize)
return D
def write_lines(D, psl):
out = open(psl, 'w')
for t in D:
line = '0\t0\t0\t0\t0\t0\t0\t0\t%s\t%s\t%s\t0\t%s\t%s\t0\t%s\t%s\t%s\t%s\t%s\t%s\n' % (D[t][0],
D[t][1],
D[t][-1][1] + 1 - D[t][-1][0],
D[t][-1][1] - D[t][-1][0],
D[t][2],
D[t][-1][0] -1,
D[t][-1][1],
len(D[t][5]),
''.join([str(x)+',' for x in D[t][5]]),
'0,' * len(D[t][5]),
''.join([str(x-1)+',' for x in D[t][3]]))
out.write(line)
out.close()
if __name__ == '__main__':
D = fun(sys.argv[1])
print len(D)
write_lines(D, sys.argv[2])
|
# -*- coding: utf-8 -*-
"""Core Auxein mutations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABC, abstractmethod
from auxein.population.genotype import Genotype
import numpy as np
class Mutation(ABC):
def __init__(self, extend_probability: float = 0.0):
assert 0 <= extend_probability <= 1, 'extend_probability must be within [0, 1]'
self.extend_probability = extend_probability
def _extend(self, genotype: Genotype, new_gene: float) -> Genotype:
if np.random.uniform(0, 1) <= self.extend_probability:
dna = genotype.dna
mask = genotype.mask
return Genotype(np.append(dna, new_gene), np.append(mask, np.random.normal(0, 1)))
return genotype
@abstractmethod
def mutate(self, genotype: Genotype) -> Genotype:
pass
class Uniform(Mutation):
def __init__(self, lower_bound: float, upper_bound: float, extend_probability: float = 0.0) -> None:
super().__init__(extend_probability=extend_probability)
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def mutate(self, genotype: Genotype) -> Genotype:
gene_index = np.random.randint(
0,
genotype.dimension
)
dna = genotype.dna
dna[gene_index] = np.random.uniform(self.lower_bound, self.upper_bound)
new_gene = np.random.uniform(self.lower_bound, self.upper_bound)
return super()._extend(Genotype(dna, genotype.mask), new_gene)
class FixedVariance(Mutation):
def __init__(self, sigma: float, extend_probability: float = 0.0) -> None:
super().__init__(extend_probability=extend_probability)
self.sigma = sigma
def mutate(self, genotype: Genotype) -> Genotype:
move = np.vectorize(lambda g: g + np.random.normal(0, self.sigma))
new_gene = np.random.normal(0, self.sigma)
return super()._extend(Genotype(move(genotype.dna), genotype.mask), new_gene)
class SelfAdaptiveSingleStep(Mutation):
def __init__(self, tau: float, extend_probability: float = 0.0) -> None:
super().__init__(extend_probability=extend_probability)
self.tau = tau
def mutate(self, genotype: Genotype) -> Genotype:
multiplier = np.exp(np.random.normal(0, self.tau))
move_mask = np.vectorize(lambda g: g * multiplier)
updated_mask = move_mask(genotype.mask)
scalr = np.random.normal(0, 1, genotype.dimension)
dna = genotype.dna + (updated_mask * scalr)
new_gene = np.random.normal(0, self.tau)
return super()._extend(Genotype(dna, updated_mask), new_gene)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from pants.bsp.spec.base import Uri
from pants.bsp.utils import freeze_json
@dataclass(frozen=True)
class BuildClientCapabilities:
# The languages that this client supports.
# The ID strings for each language is defined in the LSP.
# The server must never respond with build targets for other
# languages than those that appear in this list.
language_ids: tuple[str, ...]
@classmethod
def from_json_dict(cls, d):
return cls(language_ids=tuple(d.get("languageIds", [])))
def to_json_dict(self):
return {
"languageIds": self.language_ids,
}
@dataclass(frozen=True)
class InitializeBuildParams:
# Name of the client
display_name: str
# The version of the client
version: str
# The BSP version that the client speaks
bsp_version: str
# The rootUri of the workspace
root_uri: Uri
# The capabilities of the client
capabilities: BuildClientCapabilities
# Additional metadata about the client
data: Any | None
@classmethod
def from_json_dict(cls, d):
return cls(
display_name=d["displayName"],
version=d["version"],
bsp_version=d["bspVersion"],
root_uri=d["rootUri"],
capabilities=BuildClientCapabilities.from_json_dict(d["capabilities"]),
data=freeze_json(d.get("data")),
)
def to_json_dict(self):
result = {
"displayName": self.display_name,
"version": self.version,
"bspVersion": self.bsp_version,
"rootUri": self.root_uri,
"capabilities": self.capabilities.to_json_dict(),
}
if self.data is not None:
result["data"] = self.data
return result
@dataclass(frozen=True)
class CompileProvider:
language_ids: tuple[str, ...]
@classmethod
def from_json_dict(cls, d):
return cls(language_ids=tuple(d.get("languageIds", [])))
def to_json_dict(self):
return {
"languageIds": self.language_ids,
}
@dataclass(frozen=True)
class RunProvider:
language_ids: tuple[str, ...]
@classmethod
def from_json_dict(cls, d):
return cls(language_ids=tuple(d.get("languageIds", [])))
def to_json_dict(self):
return {
"languageIds": self.language_ids,
}
@dataclass(frozen=True)
class DebugProvider:
language_ids: tuple[str, ...]
@classmethod
def from_json_dict(cls, d):
return cls(language_ids=tuple(d.get("languageIds", [])))
def to_json_dict(self):
return {
"languageIds": self.language_ids,
}
@dataclass(frozen=True)
class TestProvider:
language_ids: tuple[str, ...]
@classmethod
def from_json_dict(cls, d):
return cls(language_ids=tuple(d.get("languageIds", [])))
def to_json_dict(self):
return {
"languageIds": self.language_ids,
}
@dataclass(frozen=True)
class BuildServerCapabilities:
# The languages the server supports compilation via method buildTarget/compile.
compile_provider: CompileProvider | None
# The languages the server supports test execution via method buildTarget/test
test_provider: TestProvider | None
# The languages the server supports run via method buildTarget/run
run_provider: RunProvider | None
# The languages the server supports debugging via method debugSession/start
debug_provider: DebugProvider | None
# The server can provide a list of targets that contain a
# single text document via the method buildTarget/inverseSources
inverse_sources_provider: bool | None
# The server provides sources for library dependencies
# via method buildTarget/dependencySources
dependency_sources_provider: bool | None
# The server cam provide a list of dependency modules (libraries with meta information)
# via method buildTarget/dependencyModules
dependency_modules_provider: bool | None
# The server provides all the resource dependencies
# via method buildTarget/resources
resources_provider: bool | None
# Reloading the build state through workspace/reload is supported
can_reload: bool | None
# The server sends notifications to the client on build
# target change events via buildTarget/didChange
build_target_changed_provider: bool | None
@classmethod
def from_json_dict(cls, d):
return cls(
compile_provider=CompileProvider.from_json_dict(d["compileProvider"])
if "compileProvider" in d
else None,
test_provider=TestProvider.from_json_dict(d["testProvider"])
if "testProvider" in d
else None,
run_provider=RunProvider.from_json_dict(d["runProvider"])
if "runProvider" in d
else None,
debug_provider=DebugProvider.from_json_dict(d["debugProvider"])
if "debugProvider" in d
else None,
inverse_sources_provider=d.get("inverseSourcesProvider"),
dependency_sources_provider=d.get("dependencySourcesProvider"),
dependency_modules_provider=d.get("dependencyModulesProvider"),
resources_provider=d.get("resourcesProvider"),
can_reload=d.get("canReload"),
build_target_changed_provider=d.get("buildTargetChangedProvider"),
)
def to_json_dict(self):
result = {}
if self.compile_provider is not None:
result["compileProvider"] = self.compile_provider.to_json_dict()
if self.test_provider is not None:
result["testProvider"] = self.test_provider.to_json_dict()
if self.run_provider is not None:
result["runProvider"] = self.run_provider.to_json_dict()
if self.debug_provider is not None:
result["debugProvider"] = self.debug_provider.to_json_dict()
if self.inverse_sources_provider is not None:
result["inverseSourcesProvider"] = self.inverse_sources_provider
if self.dependency_sources_provider is not None:
result["dependencySourcesProvider"] = self.dependency_sources_provider
if self.dependency_modules_provider is not None:
result["dependencyModulesProvider"] = self.dependency_modules_provider
if self.resources_provider is not None:
result["resourcesProvider"] = self.resources_provider
if self.can_reload is not None:
result["canReload"] = self.can_reload
if self.build_target_changed_provider is not None:
result["buildTargetChangedProvider"] = self.build_target_changed_provider
return result
@dataclass(frozen=True)
class InitializeBuildResult:
# Name of the server
display_name: str
# The version of the server
version: str
# The BSP version that the server speaks
bsp_version: str
# The capabilities of the build server
capabilities: BuildServerCapabilities
# Additional metadata about the server
data: Any | None
@classmethod
def from_json_dict(cls, d):
return cls(
display_name=d["displayName"],
version=d["version"],
bsp_version=d["bspVersion"],
capabilities=BuildServerCapabilities.from_json_dict(d["capabilities"]),
data=d.get("data"),
)
def to_json_dict(self):
result = {
"displayName": self.display_name,
"version": self.version,
"bspVersion": self.bsp_version,
"capabilities": self.capabilities.to_json_dict(),
}
if self.data is not None:
# TODO: Figure out whether to encode/decode data in a generic manner.
result["data"] = self.data
return result
|
import os
import time
from datetime import datetime as dt, timedelta
from dateutil.parser import parse as date_parse
os.chdir('..')
from RScheduler import TaskScheduler
def job(x, y): print(x, y)
def test_registry():
s = TaskScheduler()
s.every("businessday").at("10:00").do(job, x="hello", y="world")
s.on('2019-05-16').do(job, x="hello", y="world")
assert len(s.jobs) == 2
def test_regular():
d = dt.now().replace(hour=23, minute=59, second=0, microsecond=0)
s = TaskScheduler()
s.every("day").at("23:59").do(job, x="hello", y="world")
assert (s.jobs[0].next_timestamp==dt.timestamp(d))
def test_onetime():
yesterday = (dt.now() - timedelta(days=1)).replace(hour=23, minute=59, second=0, microsecond=0)
tomorrow = (dt.now() + timedelta(days=1)).replace(hour=23, minute=59, second=0, microsecond=0)
s = TaskScheduler()
s.on(yesterday.strftime("%Y-%m-%d")).at("23:59").do(job, x="hello", y="world")
s.on(tomorrow.strftime("%Y-%m-%d")).at("23:59").do(job, x="hello", y="world")
for j in s.jobs:
assert (j.next_timestamp==dt.timestamp(tomorrow) or j.next_timestamp==0)
assert len(s.jobs) == 2
s.check()
assert len(s.jobs) == 1
def test_repeat():
d = time.time()
interval = 2
s = TaskScheduler()
s.every(interval).do(job, x="hello", y="world")
assert (abs(s.jobs[0].next_timestamp - (d+interval)) < 1)
time.sleep(interval)
s.check()
assert (abs(s.jobs[0].next_timestamp - (d+(2*interval))) < 0.1)
|
# Generated by Django 2.0.9 on 2019-03-08 03:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracking', '0002_auto_20190111_1434'),
]
operations = [
migrations.AlterField(
model_name='device',
name='district',
field=models.CharField(choices=[('SWAN', 'Swan Region'), ('PHD', 'Perth Hills'), ('SCD', 'Swan Coastal'), ('SWR', 'South West Region'), ('BWD', 'Blackwood'), ('WTN', 'Wellington'), ('WR', 'Warren Region'), ('DON', 'Donnelly'), ('FRK', 'Frankland'), ('SCR', 'South Coast Region'), ('ALB', 'Albany'), ('ESP', 'Esperance'), ('KIMB', 'Kimberley Region'), ('EKD', 'East Kimberley'), ('WKD', 'West Kimberley'), ('PIL', 'Pilbara Region'), ('EXM', 'Exmouth'), ('GLD', 'Goldfields Region'), ('MWR', 'Midwest Region'), ('GER', 'Geraldton'), ('KLB', 'Kalbarri'), ('MOR', 'Moora'), ('SHB', 'Shark Bay'), ('WBR', 'Wheatbelt Region'), ('CWB', 'Central Wheatbelt'), ('SWB', 'Southern Wheatbelt'), ('AV', 'Aviation'), ('OTH', 'Other')], default='OTH', max_length=32, verbose_name='Region/District'),
),
]
|
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import random
from f5.bigip.tm.asm.policies.whitelist_ips import Whitelist_Ip
from f5.sdk_exception import AttemptedMutationOfReadOnly
from requests.exceptions import HTTPError
class TestWhitelistIps(object):
def test_create_req_arg(self, policy):
name = '.'.join(str(random.randint(1, 254)) for i in range(4))
ip1 = policy.whitelist_ips_s.whitelist_ip.create(
ipAddress=name
)
assert ip1.kind == 'tm:asm:policies:whitelist-ips:whitelist-ipstate'
assert ip1.ipAddress == name
assert ip1.ipMask == '255.255.255.255'
ip1.delete()
def test_create_optional_args(self, policy):
name = '.'.join(str(random.randint(1, 254)) for i in range(3)) + '.0'
ip1 = policy.whitelist_ips_s.whitelist_ip.create(
ipAddress=name,
ipMask='255.255.255.0'
)
assert ip1.kind == 'tm:asm:policies:whitelist-ips:whitelist-ipstate'
assert ip1.ipAddress == name
assert ip1.ipMask == '255.255.255.0'
ip1.delete()
def test_refresh(self, policy):
name = '.'.join(str(random.randint(1, 254)) for i in range(4))
ip1 = policy.whitelist_ips_s.whitelist_ip.create(
ipAddress=name
)
ip2 = policy.whitelist_ips_s.whitelist_ip.load(id=ip1.id)
assert ip1.kind == ip2.kind
assert ip1.ipAddress == ip2.ipAddress
assert ip1.description == ip2.description
ip2.modify(description='TESTFAKE')
assert ip1.description == ''
assert ip2.description == 'TESTFAKE'
ip1.refresh()
assert ip1.description == 'TESTFAKE'
ip1.delete()
def test_modify_read_only_raises(self, policy):
name = '.'.join(str(random.randint(1, 254)) for i in range(3)) + '.0'
ip1 = policy.whitelist_ips_s.whitelist_ip.create(
ipAddress=name,
ipMask='255.255.255.0'
)
with pytest.raises(AttemptedMutationOfReadOnly):
ip1.modify(ipMask='255.255.0.0')
def test_delete(self, policy):
name = '.'.join(str(random.randint(1, 254)) for i in range(4))
ip1 = policy.whitelist_ips_s.whitelist_ip.create(
ipAddress=name
)
idhash = str(ip1.id)
ip1.delete()
with pytest.raises(HTTPError) as err:
policy.whitelist_ips_s.whitelist_ip.load(id=idhash)
assert err.value.response.status_code == 404
def test_load_no_object(self, policy):
with pytest.raises(HTTPError) as err:
policy.whitelist_ips_s.whitelist_ip.load(id='Lx3553-321')
assert err.value.response.status_code == 404
def test_load(self, policy):
name = '.'.join(str(random.randint(1, 254)) for i in range(4))
ip1 = policy.whitelist_ips_s.whitelist_ip.create(
ipAddress=name
)
assert ip1.kind == 'tm:asm:policies:whitelist-ips:whitelist-ipstate'
assert ip1.ipAddress == name
assert ip1.ipMask == '255.255.255.255'
assert ip1.description == ''
ip1.modify(description='TESTFAKE')
assert ip1.description == 'TESTFAKE'
ip2 = policy.whitelist_ips_s.whitelist_ip.load(id=ip1.id)
assert ip1.kind == ip2.kind
assert ip1.ipAddress == ip2.ipAddress
assert ip1.selfLink == ip2.selfLink
assert ip1.description == ip2.description
ip1.delete()
def test_whitelistips_subcollection(self, policy):
name = '.'.join(str(random.randint(1, 254)) for i in range(4))
ip1 = policy.whitelist_ips_s.whitelist_ip.create(
ipAddress=name
)
assert ip1.kind == 'tm:asm:policies:whitelist-ips:whitelist-ipstate'
assert ip1.ipAddress == name
assert ip1.ipMask == '255.255.255.255'
cc = policy.whitelist_ips_s.get_collection()
assert isinstance(cc, list)
assert len(cc)
assert isinstance(cc[0], Whitelist_Ip)
ip1.delete()
|
#! /usr/bin/env python3
"""pretty-print json data in a given file
saves a pretty-printed version instead of overwriting the original
"""
import sys
import os
import json
INDENT = 2
def prettify(filename):
"""input: filename
output: None
side-effect: a file containing the pretty printed json
"""
outname = '_pretty'.join(os.path.splitext(filename))
with open(filename) as _in:
data = json.load(_in)
with open(outname, "w") as _out:
json.dump(data, _out, indent=INDENT)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: python(3) jsonp.py <filename>")
else:
prettify(sys.argv[1])
|
#MenuTitle: Move All Paths Position to Left Bottom
# -*- coding: utf-8 -*-
__doc__="""
左下の座標が(0,ディセンダー値)になるようにパスを移動
"""
Glyphs.clearLog()
font = Glyphs.font
layer = font.selectedLayers[0]
desc = layer.descender
nodeMinX = layer.bounds.origin.x
nodeMinY = layer.bounds.origin.y
#x,y座標を移動する
for thisPath in layer.paths:
for thisNode in thisPath.nodes:
thisNode.x += 0 - nodeMinX
thisNode.y += desc - nodeMinY
|
#!/usr/bin/env python3
import datetime
import json
import os
import re
import fnmatch
from PIL import Image
import numpy as np
import sys
sys.path.append('../')
from pycococreatortools import pycococreatortools
ROOT_DIR = '/home/ccchang/disk2_1tb_ssd/robot_dataset/panel_exp_fixed/mycoco/'
###Synthetic images
IMAGE_DIR = os.path.join(ROOT_DIR, "coco_train_200k/")
ANNOTATION_DIR = os.path.join(ROOT_DIR, "coco_label_200k/")
###Real images
# IMAGE_DIR = os.path.join(ROOT_DIR, "coco_real_train/")
# ANNOTATION_DIR = os.path.join(ROOT_DIR, "coco_real_label/")
INFO = {
"description": "Example Dataset",
"url": "https://github.com/waspinator/pycococreator",
"version": "0.1.0",
"year": 2018,
"contributor": "waspinator",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Attribution-NonCommercial-ShareAlike License",
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
}
]
CATEGORIES = [
{
'id': 1,
'name': 'panel',
'supercategory': 'shape',
},
# {
# 'id': 2,
# 'name': 'circle',
# 'supercategory': 'shape',
# },
# {
# 'id': 3,
# 'name': 'triangle',
# 'supercategory': 'shape',
# },
]
def filter_for_jpeg(root, files):
file_types = ['*.jpeg', '*.jpg','*.png']
file_types = r'|'.join([fnmatch.translate(x) for x in file_types])
files = [os.path.join(root, f) for f in files]
files = [f for f in files if re.match(file_types, f)]
return files
def filter_for_annotations(root, files, image_filename):
file_types = ['*.png']
file_types = r'|'.join([fnmatch.translate(x) for x in file_types])
basename_no_extension = os.path.splitext(os.path.basename(image_filename))[0]
file_name_prefix = basename_no_extension + '.*'
files = [os.path.join(root, f) for f in files]
files = [f for f in files if re.match(file_types, f)]
files = [f for f in files if re.match(file_name_prefix, os.path.splitext(os.path.basename(f))[0])]
return files
def main_old():
coco_output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
image_id = 1
segmentation_id = 1
# filter for jpeg images
for root, _, files in os.walk(IMAGE_DIR):
image_files = filter_for_jpeg(root, files)
print(image_files)
# go through each image
for image_filename in image_files:
image = Image.open(image_filename)
image_info = pycococreatortools.create_image_info(
image_id, os.path.basename(image_filename), image.size)
coco_output["images"].append(image_info)
# filter for associated png annotations
for root, _, files in os.walk(ANNOTATION_DIR):
annotation_files = filter_for_annotations(root, files, image_filename)
print(annotation_files)
input("stop")
# go through each associated annotation
for annotation_filename in annotation_files:
# if annotation_filename.find("32_panel_0")==-1:
# if annotation_filename.find("32_panel_0")!=-1:
# continue
print(annotation_filename)
class_id = [x['id'] for x in CATEGORIES if x['name'] in annotation_filename][0]
category_info = {'id': class_id, 'is_crowd': 'crowd' in image_filename}
# print("here0")
tmp_img = np.array(Image.open(annotation_filename))
tmp_img[tmp_img<255] = 0
tmp_img = Image.fromarray(tmp_img)
# binary_mask = np.asarray(Image.open(annotation_filename).convert('1')).astype(np.uint8) #Image convert("1") -> grayscale
# binary_mask = np.asarray(tmp_img.convert('1')).astype(np.uint8)
binary_mask = np.array(tmp_img).astype(np.uint8)
# print("here1")
annotation_info = pycococreatortools.create_annotation_info(
segmentation_id, image_id, category_info, binary_mask,
image.size, tolerance=2)
# print("here2")
if annotation_info is not None:
coco_output["annotations"].append(annotation_info)
segmentation_id = segmentation_id + 1
image_id = image_id + 1
with open('{}/panel_coco.json'.format(ROOT_DIR), 'w') as output_json_file:
json.dump(coco_output, output_json_file)
def main():
coco_output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
image_id = 1
segmentation_id = 1
all_img_str = []
all_ann_str = []
### img_ann_dict : {'0':['0_panel_0','0_panel_1'], '1':['1_panel_0','1_panel_1']...}
img_ann_dict = {}
for img_name in os.listdir(IMAGE_DIR):
all_img_str.append(img_name)
for ann_name in os.listdir(ANNOTATION_DIR):
all_ann_str.append(ann_name)
for ann_name in all_ann_str:
pos = ann_name.find('_')
img_id = ann_name[:pos]
if img_id in img_ann_dict.keys():
img_ann_dict[img_id].append(ann_name)
else:
img_ann_dict[img_id] = [ann_name]
# go through each image
for img_id in img_ann_dict.keys():
image_filename = IMAGE_DIR + img_id + ".png"
image = Image.open(image_filename)
image_info = pycococreatortools.create_image_info(image_id, os.path.basename(image_filename), image.size)
coco_output["images"].append(image_info)
ann_files = img_ann_dict[img_id]
# go through each associated annotation
for ann_str in ann_files:
# if annotation_filename.find("32_panel_0")==-1:
# if annotation_filename.find("32_panel_0")!=-1:
# continue
annotation_filename = ANNOTATION_DIR + ann_str
# print(annotation_filename)
class_id = [x['id'] for x in CATEGORIES if x['name'] in annotation_filename][0]
category_info = {'id': class_id, 'is_crowd': 'crowd' in image_filename}
tmp_img = np.array(Image.open(annotation_filename))
tmp_img[tmp_img<255] = 0
tmp_img = Image.fromarray(tmp_img)
# binary_mask = np.asarray(Image.open(annotation_filename).convert('1')).astype(np.uint8) #Image convert("1") -> grayscale
# binary_mask = np.asarray(tmp_img.convert('1')).astype(np.uint8)
binary_mask = np.array(tmp_img).astype(np.uint8)
annotation_info = pycococreatortools.create_annotation_info(
segmentation_id, image_id, category_info, binary_mask,
image.size, tolerance=2)
if annotation_info is not None:
coco_output["annotations"].append(annotation_info)
segmentation_id = segmentation_id + 1
if image_id %1000 == 0:
print("Finished num:",image_id)
image_id = image_id + 1
with open('{}/panel_coco_200k.json'.format(ROOT_DIR), 'w') as output_json_file:
json.dump(coco_output, output_json_file)
if __name__ == "__main__":
main()
|
from hamilton import node
from hamilton.experimental.decorators import augment
|
import torch.nn.functional as F
from torch import nn
class CapsuleLoss(nn.Module):
def __init__(self):
super(CapsuleLoss, self).__init__()
def forward(self, classes, labels):
left = F.relu(0.9 - classes, inplace=True) ** 2
right = F.relu(classes - 0.1, inplace=True) ** 2
margin_loss = labels * left + 0.5 * (1. - labels) * right
margin_loss = margin_loss.sum()
return margin_loss
if __name__ == "__main__":
digit_loss = CapsuleLoss()
print(digit_loss)
|
import wikipedia
print(len(wikipedia.summary("Albert Einstein", sentences=15))) |
import datetime
from collections import namedtuple
from . import consts as C
from .exceptions import FileFormatError, RDBValueError
from .util import read_byte, read_int, skip_bytes, unpack, unpack_pairs
from .intset import unpack_intset
from .ziplist import unpack_ziplist
from .zipmap import unpack_zipmap
from .lzf import unpack_lzf
__all__ = [
'parse_rdb_stream',
'RDBItem',
]
RDBItem = namedtuple('RDBItem', 'dbnum key_type key value expire info')
def parse_rdb_stream(f, skip_db=lambda dbnum: False,
skip_key_type=lambda dbnum, key_type: False,
skip_key=lambda dbnum, key_type, key: False):
"""Parses RDB file stream.
Returns generator that parses input byte stream and yields db items.
"""
dbnum = None
_skip_db = False
# read signature and version
read_signature(f)
version = read_version(f)
if version > 6:
raise NotImplementedError("Version {} is not supported"
.format(version))
while True:
try:
ctl_code = read_byte(f)
except TypeError:
# assume thats the end of a file
# TODO: find a better way to handle this;
break
if ctl_code == C.SELECTDB:
dbnum = read_length(f)
_skip_db = skip_db(dbnum)
continue
if ctl_code == C.RDB_EOF:
# TODO: maybe check crc
break
if dbnum is None:
raise FileFormatError("Select DB code expected but none found")
expire, ctl_code = read_expire(ctl_code, f)
if ctl_code not in C.VALUE_ENC_TYPES:
raise RDBValueError("Got unknown data type {}"
.format(hex(ctl_code)))
key_type = C.TYPE_NAMES[ctl_code]
if _skip_db or skip_key_type(dbnum, key_type):
read_skip_key(f)
read_skip_value(ctl_code, f)
continue
key = read_key(f)
if skip_key(dbnum, key_type, key):
read_skip_value(ctl_code, f)
continue
value = read_value(ctl_code, f)
info = {'encoding': C.ENC_NAMES[ctl_code]}
# TODO: fill info dict (rdb version, encoding, etc)
yield RDBItem(dbnum, key_type, key, value, expire, info)
def read_signature(f):
bsign = f.read(5)
if bsign != C.MAGIC_STRING:
raise FileFormatError("Invalid file format")
def read_version(f):
bversion = f.read(4)
try:
version = int(bversion)
except ValueError:
raise FileFormatError("Invalid RDB version number")
if version < 1:
raise FileFormatError("Invalid RDB version number")
return version
def read_expire(ctl_code, f):
if ctl_code == C.EXPIRE_SEC:
exp = unpack('I', f.read(4))
return datetime.datetime.utcfromtimestamp(exp), read_byte(f)
elif ctl_code == C.EXPIRE_MSEC:
exp = unpack('Q', f.read(8))
return datetime.datetime.utcfromtimestamp(exp / 1000), read_byte(f)
return None, ctl_code
def read_skip_expire(ctl_code, f):
if ctl_code == C.EXPIRE_SEC:
skip_bytes(f, 4)
return None, read_byte(f)
elif ctl_code == C.EXPIRE_MSEC:
skip_bytes(f, 8)
return None, read_byte(f)
return None, ctl_code
def read_string(f):
str_enc_type, len_ = read_string_length(f)
if str_enc_type == C.STR_RAW:
return f.read(len_)
elif str_enc_type == C.STR_INTEGER:
return read_int(f, len_)
elif str_enc_type == C.STR_COMPRESSED:
clen, explen = len_
return unpack_lzf(f, clen, explen)
raise RDBValueError("Got unknown string encoding type {}"
.format(hex(str_enc_type)))
def read_skip_string(f):
"""
read string length and skip that number of bytes;
Note: for compressed strings length is a tuple of
compressed & uncompressed lengths
"""
str_enc_type, len_ = read_string_length(f)
if str_enc_type == C.STR_COMPRESSED:
len_, explen = len_
skip_bytes(f, len_)
read_key = read_string
read_skip_key = read_skip_string
def read_value(ctl_code, f):
if ctl_code == C.VALUE_ENC_STRING:
return read_string(f)
elif ctl_code == C.VALUE_ENC_LIST:
return list(unpack_list(f))
elif ctl_code == C.VALUE_ENC_SET:
# TODO: set values are ordered
# returning list instead of set to keep order
return list(unpack_set(f))
elif ctl_code == C.VALUE_ENC_SORTET_SET:
return list(unpack_zset(f))
elif ctl_code == C.VALUE_ENC_HASH:
return list(unpack_hash(f))
elif ctl_code == C.VALUE_ENC_ZIPMAP:
return list(unpack_zipmap(read_string(f)))
elif ctl_code == C.VALUE_ENC_ZIPLIST:
return list(unpack_ziplist(read_string(f)))
elif ctl_code == C.VALUE_ENC_INTSET:
# returning list instead of set to keep order
return list(unpack_intset(read_string(f)))
elif ctl_code == C.VALUE_ENC_ZSET_IN_ZIPLIST:
return list(unpack_pairs(unpack_ziplist(read_string(f))))
elif ctl_code == C.VALUE_ENC_HASH_IN_ZIPLIST:
return list(unpack_pairs(unpack_ziplist(read_string(f))))
raise RDBValueError("Got unknown data type {}".format(hex(ctl_code)))
def read_skip_value(ctl_code, f):
if ctl_code == C.VALUE_ENC_STRING:
read_skip_string(f)
elif ctl_code == C.VALUE_ENC_LIST or ctl_code == C.VALUE_ENC_SET:
lsize = read_length(f)
for _ in range(lsize):
read_skip_string(f)
elif ctl_code == C.VALUE_ENC_SORTET_SET:
zs_size = read_length(f)
for _ in range(zs_size):
read_skip_string(f)
skip_bytes(f, read_byte(f))
elif ctl_code == C.VALUE_ENC_HASH:
hsize = read_length(f)
for _ in range(hsize * 2):
read_skip_string(f)
elif ctl_code == C.VALUE_ENC_ZIPMAP:
read_skip_string(f)
elif ctl_code == C.VALUE_ENC_ZIPLIST:
read_skip_string(f)
elif ctl_code == C.VALUE_ENC_INTSET:
read_skip_string(f)
elif ctl_code == C.VALUE_ENC_ZSET_IN_ZIPLIST:
read_skip_string(f)
elif ctl_code == C.VALUE_ENC_HASH_IN_ZIPLIST:
read_skip_string(f)
def read_length(f):
byte = read_byte(f)
enc_type = byte >> 6
val = byte & 0x3F
if enc_type == C.LEN_ENC_6BIT:
return val
elif enc_type == C.LEN_ENC_14BIT:
next_val = read_byte(f)
return (val << 8) | next_val
elif enc_type == C.LEN_ENC_32BIT:
val = unpack('>I', f.read(4))
return val
# else: LEN_ENC_SPECIAL
# in docs this bytes read as signed integers;
elif val == C.LEN_ENC_SPECIAL_8BIT:
return read_byte(f)
elif val == C.LEN_ENC_SPECIAL_16BIT:
return unpack('H', f.read(2))
elif val == C.LEN_ENC_SPECIAL_32BIT:
return unpack('I', f.read(4))
raise RDBValueError("Got unknown length encoding type {}"
.format(hex(byte)))
def read_string_length(f):
"""
parses string length and returns
string encoding type and string length;
Note: if string is compressed string length is returned as
a tuple consisting of compressed length and
expected uncompressed length
"""
byte = read_byte(f)
enc_type = byte >> 6
val = byte & 0x3F
# no encoding; raw string follows
if enc_type == C.LEN_ENC_6BIT:
return C.STR_RAW, val
elif enc_type == C.LEN_ENC_14BIT:
next_val = read_byte(f)
return C.STR_RAW, ((val << 8) | next_val)
elif enc_type == C.LEN_ENC_32BIT:
val = unpack('>I', f.read(4))
return C.STR_RAW, val
# string encoded integer;
elif val == C.LEN_ENC_SPECIAL_8BIT:
return C.STR_INTEGER, 1
elif val == C.LEN_ENC_SPECIAL_16BIT:
return C.STR_INTEGER, 2
elif val == C.LEN_ENC_SPECIAL_32BIT:
return C.STR_INTEGER, 4
# lzf encoded string
elif val == C.LEN_ENC_SPECIAL_LZF:
clen = read_length(f)
explen = read_length(f)
return C.STR_COMPRESSED, (clen, explen)
raise RDBValueError("Got unknown length encoding type {}"
.format(hex(byte)))
def unpack_list(f):
lsize = read_length(f)
for _ in range(lsize):
yield read_string(f)
unpack_set = unpack_list
def unpack_zset(f):
zs_size = read_length(f)
for _ in range(zs_size):
val = read_string(f)
len_ = read_byte(f)
score = f.read(len_)
# let the user handle score convertion (to float/int/Decimal/whatever)
# will implement some conversion in cli tool
yield val, score
def unpack_hash(f):
hsize = read_length(f)
for _ in range(hsize):
field = read_string(f)
value = read_string(f)
yield field, value
|
from .assay import AssayBuilder
from .glycoassay import GlycoAssayBuilder
|
from .bbox_nms import multiclass_nms
from .bbox_nms_reid import multiclass_nms_reid
from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_scores)
__all__ = [
'multiclass_nms', 'multiclass_nms_reid', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores', 'merge_aug_masks'
]
|
class Route:
'''
route decorator
'''
def __init__(self, app):
# introduction app (framework) instance
self.app = app
def __call__(self, url, **options):
'''
implement call method
'''
# if methods parameter not define, then init just support GET method
if 'methods' not in options:
options['methods'] = ['GET']
def decorator(f):
# call app internal add_url_rule add rule
self.app.add_url_rule(url, f, 'route', **options)
return f
return decorator
|
from collections import defaultdict
from util import UNDEF_ADDR, CFuncGraph, GraphBuilder, hexrays_vars, get_expr_name
import idaapi
import ida_hexrays
import json
import jsonlines
import os
import re
import subprocess
import sys
try:
from CStringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
# Dictionary mapping variable ids to (orig, orig) pairs
varnames = dict()
oldvarnames = dict()
var_id = 0
sentinel_vars = re.compile('@@VAR_[0-9]+')
actname = "predict:varnames"
dire_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))
RUN_ONE = os.path.join(dire_dir, "run_one.py")
MODEL = os.path.join(dire_dir, 'data', 'saved_models', 'model.hybrid.bin')
# Rename variables to sentinel representation
class RenamedGraphBuilder(GraphBuilder):
def __init__(self, cg, func, vuu):
self.func = func
self.vuu = vuu
super(RenamedGraphBuilder, self).__init__(cg)
def visit_expr(self, e):
global var_id
if e.op is ida_hexrays.cot_var:
# Save original name of variable
original_name = get_expr_name(e)
if not sentinel_vars.match(original_name):
# Rename variables to @@VAR_[id]@@[orig name]@@[orig name]
new_name = '@@VAR_' + str(var_id) + '@@' + original_name + '@@' + original_name
self.vuu.rename_lvar(self.vuu.cfunc.get_lvars()[e.v.idx],
str(new_name),
True)
# This is needed so that the NN graph sees the new
# name without visiting the entire expression again.
self.func.get_lvars()[e.v.idx].name = str(new_name)
varnames[original_name] = new_name
oldvarnames[new_name] = original_name
var_id += 1
return self.process(e)
# Rename variables to predicted names
class FinalRename(ida_hexrays.ctree_visitor_t):
def __init__(self, renamings, func, vuu):
super(FinalRename, self).__init__(0)
self.renamings = renamings
self.func = func
self.vuu = vuu
def visit_expr(self, e):
if e.op is ida_hexrays.cot_var:
original_name = get_expr_name(e)
if original_name in self.renamings:
new_name = self.renamings[original_name]
if oldvarnames[original_name] != new_name:
print("Renaming %s to %s"%(oldvarnames[original_name],new_name))
# This one refreshes the pseudo-code window
self.vuu.rename_lvar(self.vuu.cfunc.get_lvars()[e.v.idx],
str(new_name),
True)
# This one stops us from renaming the same variable
# over and over. Otherwise it's not needed.
self.func.get_lvars()[e.v.idx].name = str(new_name)
return 0
# Process a single function given its EA
def func(ea, vuu):
f = idaapi.get_func(ea)
function_name = idaapi.get_func_name(ea)
if f is None:
print('Please position the cursor within a function')
cfunc = None
try:
cfunc = idaapi.decompile(f)
except ida_hexrays.DecompilationFailure as e:
print('Failed to decompile %x: %s!' % (ea, function_name))
raise e
# Rename decompilation graph
cg = CFuncGraph(None)
gb = GraphBuilder(cg)
gb.apply_to(cfunc.body, None)
#ac = AddressCollector(cg)
#ac.collect()
rg = RenamedGraphBuilder(cg, cfunc, vuu)
rg.apply_to(cfunc.body, None)
# Create tree from collected names
cfunc.build_c_tree()
new_graph = CFuncGraph(None)
new_builder = GraphBuilder(new_graph)
new_builder.apply_to(cfunc.body, None)
function_info = dict()
function_info["function"] = function_name
function_info["ast"] = new_graph.json_tree(0)
raw_code = ""
for line in cfunc.get_pseudocode():
raw_code += idaapi.tag_remove(line.line) + '\n'
function_info["raw_code"] = raw_code
return function_info, cfunc
class predict_names_ah_t(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
print("Suggesting variable names...")
ea = idaapi.get_screen_ea()
vuu = ida_hexrays.get_widget_vdui(ctx.widget)
if ea is None:
idaapi.warning("Current function not found.")
else:
f = StringIO()
with jsonlines.Writer(f) as writer:
try:
info, cfunc = func(ea, vuu)
# We must set the working directory to the dire dir to open the model correctly
os.chdir(dire_dir)
p = subprocess.Popen([RUN_ONE, '--model', MODEL], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, encoding=sys.getdefaultencoding())
#print(info)
writer.write(info)
comm = p.communicate(input=f.getvalue())
json_results = comm[0]
stderr = comm[1]
if p.returncode != 0:
print(stderr)
raise ValueError("Variable prediction failed")
results = json.loads(json_results)
best_results = results[0][0]
#print("best: ", best_results)
tuples = map(lambda x: (varnames[x[0]] if x[0] in varnames else x[0], x[1]['new_name']), best_results.items())
FinalRename(dict(tuples), cfunc, vuu).apply_to(cfunc.body, None)
# Force the UI to update
#vuu.refresh_ctext()
except ida_hexrays.DecompilationFailure:
idaapi.warning("Decompilation failed")
except ValueError as e:
idaapi.warning(str(e) + ". See output window for more details.")
return 1
def update(self, ctx):
return idaapi.AST_ENABLE_FOR_WIDGET if \
ctx.widget_type == idaapi.BWN_PSEUDOCODE else \
idaapi.AST_DISABLE_FOR_WIDGET
class name_hooks_t(idaapi.Hexrays_Hooks):
def __init__(self):
idaapi.Hexrays_Hooks.__init__(self)
def populating_popup(self, widget, phandle, vu):
idaapi.attach_action_to_popup(vu.ct, None, actname)
return 0
if idaapi.init_hexrays_plugin():
idaapi.register_action(
idaapi.action_desc_t(
actname,
"Predict variable names",
predict_names_ah_t(),
"P"))
name_hooks = name_hooks_t()
name_hooks.hook()
else:
print('Predict variable names: hexrays is not available.')
class plugin(idaapi.plugin_t):
flags = 0
comment = "Predicts variable names in decompiled code"
wanted_name = "Predict variable names"
#wanted_hotkey = "P"
def init(self):
return idaapi.PLUGIN_KEEP
def PLUGIN_ENTRY():
return plugin()
|
# coding: utf-8
# Copyright © 2014-2020 VMware, Inc. All Rights Reserved.
################################################################################
from typing import Dict
from unittest import TestCase
from cbopensource.connectors.taxii.taxii_connector_config import TaxiiConnectorConfiguration
from cbopensource.utilities.common_config import CommonConfigException
class TestConnectorConfig(TestCase):
@staticmethod
def minimal() -> Dict:
"""
Create and return a config structure with everything that does not have defaults.
NOTE: All supplied values are strings, as if read from a file.
:return:
"""
kwargs = {
"carbonblack_server_token": "DEADBEEF0000000000000000CAFEBABE",
"feed_retrieval_minutes": "22",
"listener_port": "4242",
}
return kwargs
# ----- Begin Tests ------------------------------------------------------------
def test_01a_config_minimal(self):
"""
Ensure config defaults work with the minimally supplied init values.
Config settings are:
cache_path (str)
debug (bool)
feed_retrieval_minutes (int)
host_address (str)
https_proxy (str)
listener_address (str)
listen_port (int)
log_file_size (int)
log_level (str)
multi_core (bool)
pretty_print_json (bool)
server_token (str)
server_url (str)
skip_cb_sync (bool)
use_feed_stream (str)
"""
cfg = TaxiiConnectorConfiguration.parse(self.minimal())
self.assertEqual('/usr/share/cb/integrations/cb-taxii-connector/cache', cfg['cache_folder'])
self.assertFalse(cfg['debug'])
self.assertEqual(22, cfg['feed_retrieval_minutes'])
self.assertEqual('127.0.0.1', cfg['host_address'])
assert 'https_proxy' not in cfg
self.assertEqual('0.0.0.0', cfg['listener_address'])
self.assertEqual(4242, cfg['listener_port'])
self.assertEqual(10485760, cfg['log_file_size'])
self.assertEqual('INFO', cfg['log_level'])
self.assertTrue(cfg['multi_core'])
self.assertFalse(cfg['pretty_print_json'])
self.assertEqual('DEADBEEF0000000000000000CAFEBABE', cfg['carbonblack_server_token'])
self.assertEqual('https://127.0.0.1', cfg['carbonblack_server_url'])
self.assertFalse(cfg['skip_cb_sync'])
self.assertTrue(cfg['use_feed_stream'])
def test_01b_config_empty(self):
"""
If we supply nothing, ensure we get the expected number of errors.
"""
try:
TaxiiConnectorConfiguration.parse({})
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "Configuration key 'carbonblack_server_token' is required" in str(err)
def test_02_cache_folder(self):
"""
Ensure 'cache_folder' can be defined.
"""
base = self.minimal()
base['cache_folder'] = "/usr/bin/foobar"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("/usr/bin/foobar", cfg['cache_folder'])
def test_03_debug(self):
"""
Ensure 'debug' can be defined.
"""
base = self.minimal()
base['debug'] = "true"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(True, cfg['debug'])
# NOTE: feed_retrieval_minutes part of all tests as required value
def test_04a_feed_retrieval_minutes_below_1(self):
"""
Ensure 'feed_retrieval_minutes' minimum is tracked.
"""
base = self.minimal()
base['feed_retrieval_minutes'] = "0"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "feed_retrieval_minutes' must be between 1 and 43200 (got 0)" in str(err)
def test_04b_feed_retrieval_minutes_above_max(self):
"""
Ensure 'feed_retrieval_minutes' minimum is tracked.
"""
base = self.minimal()
base['feed_retrieval_minutes'] = "100000"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "feed_retrieval_minutes' must be between 1 and 43200 (got 100000)" in str(err)
def test_05_host_address(self):
"""
Ensure 'host_address' can be defined.
"""
base = self.minimal()
base['host_address'] = "https://foo.com"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("https://foo.com", cfg['host_address'])
def test_06_https_proxy(self):
"""
Ensure 'https_proxy' can be defined.
"""
base = self.minimal()
base['https_proxy'] = "https://foo.com"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("https://foo.com", cfg['https_proxy'])
def test_07_listener_address(self):
"""
Ensure 'listener_address' can be defined.
"""
base = self.minimal()
base['listener_address'] = "https://foo.com"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("https://foo.com", cfg['listener_address'])
# NOTE: listener_port part of all tests as required value
def test_08a_listener_port_below_minimum(self):
"""
Ensure 'listener_port' minimum is tracked.
"""
base = self.minimal()
base['listener_port'] = "-20"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "'listener_port' must be between 1 and 65535 (got -20)" in str(err)
def test_08b_listener_port_above_maximum(self):
"""
Ensure 'listener_port' maximum is tracked.
"""
base = self.minimal()
base['listener_port'] = "70000"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "'listener_port' must be between 1 and 65535 (got 70000)" in str(err)
def test_09a_log_file_size(self):
"""
Ensure 'log_file_size' can be defined.
"""
base = self.minimal()
base['log_file_size'] = "12345678"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(12345678, cfg['log_file_size'])
def test_09b_log_file_size(self):
"""
Ensure 'log_file_size' below 0 is tracked.
"""
base = self.minimal()
base['log_file_size'] = "-1"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "log_file_size' must be between 1048576 and 1073741824 (got -1)" in str(err)
def test_10a_log_level(self):
"""
Ensure 'log_level' can be defined.
"""
base = self.minimal()
base['log_level'] = "warning"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("WARNING", cfg['log_level'])
def test_10b_log_level_unmatched(self):
"""
Ensure an invalid log level reverts to INFO.
"""
base = self.minimal()
base['log_level'] = "warn"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert ("Configuration key 'log_level' must be in allowed values "
"['DEBUG', 'INFO', 'WARNING', 'ERROR']") in str(err)
def test_11_multi_core(self):
"""
Ensure 'multi_core' can be defined.
"""
base = self.minimal()
base['multi_core'] = "False"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(False, cfg['multi_core'])
def test_12_pretty_print_json(self):
"""
Ensure 'multi_core' can be defined.
"""
base = self.minimal()
base['pretty_print_json'] = "true"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(True, cfg['pretty_print_json'])
# NOTE: carbonblack_server_token part of all tests as required value
def test_13_carbonblack_server_url(self):
"""
Ensure 'carbonblack_server_url' can be defined.
"""
base = self.minimal()
base['carbonblack_server_url'] = "https://foo.com"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("https://foo.com", cfg['carbonblack_server_url'])
def test_14a_skip_cb_sync(self):
"""
Ensure 'skip_cb_sync' can be defined.
"""
base = self.minimal()
base['skip_cb_sync'] = "True"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(True, cfg['skip_cb_sync'])
def test_15a_feed_save_mode(self):
"""
Ensure 'feed_save_mode' can be defined.
"""
base = self.minimal()
base['feed_save_mode'] = "Stream"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("STREAM", cfg['feed_save_mode'])
self.assertEqual(True, cfg['use_feed_stream'])
def test_15b_save_mode_unmatched(self):
"""
Ensure a feed_save_mode reverts to STREAM with a bad entry.
"""
base = self.minimal()
base['feed_save_mode'] = "Saved"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "Configuration key 'feed_save_mode' must be in allowed values ['STREAM', 'BULK']" in str(err)
|
# Client Payment Python SDK
# API docs at https://github.com/Space-Around/client-payment-sdk-python
# Authors:
# Viksna Max <viksnamax@mail.ru>
# ClientPaymentSDK
from .client import ClientPaymentSDK
from .sign import sign
from .exceptions import ClientPaymentSDKError, RequestError, InternalServerError, SignatureVerificationError,\
PassedTypeError, MatchKeyError, ParseResponseError
from .utils import dict_to_str
from .models import InitPaymentResponse, StatusPaymentResponse, BalanceResponse, WithdrawalResponse, \
StatusWithdrawalResponse
from .webhook import Webhook, WebhookData
|
#!/usr/bin/env python3
import importlib.util, os.path, sys
__all__ = ("update", "reset_to_zero", "increment_revision_if_not_frozen")
VERSION_PY_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"version.py"
)
def _module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def version_module():
return _module_from_file("version", VERSION_PY_FILE)
def update(major, minor, build, revision):
with open(VERSION_PY_FILE, "w") as fout:
print("#!/usr/bin/env python3", file=fout)
print("# DO NOT EDIT!", file=fout)
print("# This file is automatically generated.", file=fout)
print("# MAJOR and MINOR should be updated in make.py.", file=fout)
print("# BUILD and REVISION are incremented automatically.", file=fout)
print("MAJOR =", repr(major), file=fout)
print("MINOR =", repr(minor), file=fout)
print("BUILD =", repr(build), file=fout)
print("REVISION =", repr(revision), file=fout)
def reset_to_zero():
update(0, 0, 0, 0)
def increment_revision():
# Increment the revision number if this is running from source.
version = version_module()
update(
version.MAJOR,
version.MINOR,
version.BUILD,
version.REVISION + 1
)
def _main():
if len(sys.argv) > 1:
if sys.argv[1] == "display_current":
func = lambda: None
else:
func = globals().get(sys.argv[1])
if func:
func(*sys.argv[2:])
version = version_module()
print(
"The version is now: {}.{}.{}.{}".format(
version.MAJOR,
version.MINOR,
version.BUILD,
version.REVISION
)
)
return
print("Available options:")
for varname in __all__:
print(" -", varname)
print(" - display_current")
if __name__ == "__main__":
_main()
|
import logging
import time
from BaseHTTPServer import BaseHTTPRequestHandler
from threading import Thread, Lock, Condition
from lib.proxy import proxy_sockets
from lib.stats import EC2StatsModel
from lib.utils import ThreadedHTTPServer
logger = logging.getLogger(__name__)
class Message(object):
def __init__(self, content):
self.__content = content
self.__receiveTime = time.time()
@property
def content(self):
return self.__content
@property
def receiveTime(self):
return self.__receiveTime
class Socket(object):
def __init__(self, sock, idleTimeout):
self.__sock = sock
self.__idleTimeout = idleTimeout
self.__openTime = time.time()
@property
def sock(self):
return self.__sock
@property
def openTime(self):
return self.__openTime
@property
def idleTimeout(self):
return self.__idleTimeout
def close(self):
self.__sock.close()
class ReverseConnectionServer(object):
def __init__(self, publicHostAndPort, messageTimeout=5, connTimeout=5):
self.__messages = {}
self.__messagesLock = Lock()
self.__sockets = {}
self.__socketsLock = Lock()
self.__socketsCond = Condition(self.__socketsLock)
self.__connTimeout = connTimeout
self.__publicHostAndPort = publicHostAndPort
self.__httpServer = None
t = Thread(target=self.__timeout_sockets_and_messages,
args=(messageTimeout,))
t.daemon = True
t.start()
def __timeout_sockets_and_messages(self, messageTimeout, frequency=1):
while True:
time.sleep(frequency)
curTime = time.time()
with self.__socketsLock:
for socketId in self.__sockets.keys():
sockObj = self.__sockets[socketId]
if (curTime - sockObj.openTime > sockObj.idleTimeout):
sockObj.close()
del self.__sockets[socketId]
with self.__messagesLock:
for messageId in self.__messages.keys():
if (curTime - self.__messages[messageId].receiveTime
> messageTimeout):
del self.__messages[messageId]
@property
def publicHostAndPort(self):
return self.__publicHostAndPort
def take_ownership_of_socket(self, socketId, sock, idleTimeout):
with self.__socketsLock:
self.__sockets[socketId] = Socket(sock, idleTimeout)
self.__socketsCond.notify_all()
def get_socket(self, socketId):
endTime = time.time() + self.__connTimeout
with self.__socketsLock:
while True:
curTime = time.time()
ret = self.__sockets.get(socketId)
if ret is not None:
del self.__sockets[socketId]
break
else:
self.__socketsCond.wait(endTime - curTime)
if curTime > endTime: break
return ret
def get_message(self, messageId):
with self.__messagesLock:
ret = self.__messages.get(messageId)
if ret is not None:
del self.__messages[messageId]
return ret
def put_message(self, messageId, message):
with self.__messagesLock:
self.__messages[messageId] = message
def register_http_server(self, httpServer):
self.__httpServer = httpServer
def shutdown(self):
self.__httpServer.server_close()
self.__httpServer.shutdown()
def start_reverse_connection_server(localPort, publicHostAndPort, stats):
proxyModel = stats.get_model('proxy')
if 'ec2' not in stats.models:
stats.register_model('ec2', EC2StatsModel())
ec2Model = stats.get_model('ec2')
server = ReverseConnectionServer(publicHostAndPort)
testLivenessResponse = 'Server is live!\n'
class RequestHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
"""Override the default logging to not print ot stdout"""
logger.info('%s - [%s] %s' %
(self.client_address[0],
self.log_date_time_string(),
format % args))
def log_error(self, format, *args):
"""Override the default logging to not print ot stdout"""
logger.error('%s - [%s] %s' %
(self.client_address[0],
self.log_date_time_string(),
format % args))
def do_GET(self):
self.send_response(200)
self.send_header('Content-Length', str(len(testLivenessResponse)))
self.end_headers()
self.wfile.write(testLivenessResponse)
def do_POST(self):
messageId = self.path[1:]
messageLength = int(self.headers['Content-Length'])
messageBody = self.rfile.read(messageLength)
logger.info('Received: %s (%dB)', messageId, len(messageBody))
server.put_message(messageId, Message(messageBody))
proxyModel.record_bytes_down(len(messageBody))
self.send_response(204)
self.send_header('Content-Length', '0')
self.end_headers()
def do_CONNECT(self):
socketId = self.path[1:]
logger.info('Connect: %s', socketId)
socketRequest = server.get_socket(socketId)
try:
if socketRequest is not None:
self.send_response(200)
self.end_headers()
else:
self.send_error(404, 'Resource not found')
self.end_headers()
return
err, bytesDown, bytesUp = \
proxy_sockets(socketRequest.sock, self.connection,
socketRequest.idleTimeout)
if err is not None:
logger.exception(err)
proxyModel.record_bytes_down(bytesDown)
proxyModel.record_bytes_up(bytesUp)
ec2Model.record_bytes_down(bytesDown)
ec2Model.record_bytes_up(bytesUp)
except Exception as e:
logger.exception(e)
finally:
if socketRequest is not None:
socketRequest.close()
httpServer = ThreadedHTTPServer(('', localPort), RequestHandler)
server.register_http_server(httpServer)
t = Thread(target=lambda: httpServer.serve_forever())
t.daemon = True
t.start()
return server
|
"""Imprime 'Fizz' se o número digitado for divisível por 3"""
numero = int(input('Digite um número: '))
if numero % 3 == 0:
print('Fizz')
else:
print(numero)
|
from __future__ import division
from Util import *
class Solfeggio:
@staticmethod
def solfeggios ():
for seq in [
[1, 4, 7],
[5, 2, 8],
[3, 6, 9]]:
for perm in permutations (seq):
yield list_to_number (perm)
@staticmethod
def chrang (chakra, rang):
return solfeggio[rang, chakra]
ranges = ["low", "med", "hi"]
chakras = ["red", "orange", "yellow", "green", "blue", "purple"]
solfeggio = {}
solfeggio["low", "red"] = 174
solfeggio["low", "orange"] = 147
solfeggio["low", "yellow"] = 285
solfeggio["low", "green"] = 369
solfeggio["low", "blue"] = 396
solfeggio["low", "purple"] = 258
solfeggio["med", "red"] = 417
solfeggio["med", "orange"] = 471
solfeggio["med", "yellow"] = 528
solfeggio["med", "green"] = 693
solfeggio["med", "blue"] = 639
solfeggio["med", "purple"] = 582
solfeggio["hi", "red"] = 741
solfeggio["hi", "orange"] = 714
solfeggio["hi", "yellow"] = 852
solfeggio["hi", "green"] = 936
solfeggio["hi", "blue"] = 963
solfeggio["hi", "purple"] = 825 |
from __future__ import division
from __future__ import print_function
import torch.nn as nn
def conv2d(channels_in, channels_out, kernel_size=3, stride=1, bias = True):
return nn.Sequential(
nn.Conv2d(channels_in, channels_out, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias),
nn.LeakyReLU(0.1,inplace=True)
)
def deconv2d(channels_in, channels_out, kernel_size=4, stride=2, padding=1, bias=True):
return nn.Sequential(
nn.ConvTranspose2d(channels_in, channels_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.LeakyReLU(0.1,inplace=True)
) |
"""Utility functions finding which storage type is used."""
from birgitta import context
__all__ = ['stored_in']
def stored_in(t):
"""Returns true if storage type equals t"""
storage_type = context.get("BIRGITTA_DATASET_STORAGE")
return t == storage_type
|
#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: nico
@file: serializers.py
@time: 2018/08/24
"""
from rest_framework import serializers
from generic_relations.relations import GenericRelatedField
from blog.api.serializers import PostListSerializer
from blog.models import Post
from oper.models import UserFavorite
class UserFavoriteSerializer(serializers.ModelSerializer):
content_object = GenericRelatedField({
Post: PostListSerializer()
})
class Meta:
model = UserFavorite
fields = ('content_object', 'add_time')
|
import os
from xml.dom import minidom
from xml.dom.minidom import parseString
from xml.etree import ElementTree
import lxml.etree as ET
from eatb.utils.fileutils import read_file_content
from eatb.xml.xmlschemanotfound import XMLSchemaNotFound
def pretty_xml_string(xml_string):
xml = parseString(xml_string)
return xml.toprettyxml()
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def rewrite_pretty_xml(xml_file_path):
parser = ET.XMLParser(resolve_entities=False, remove_blank_text=True, strip_cdata=False)
parsed_file = ET.parse(xml_file_path, parser)
xml_file_root = parsed_file.getroot()
xml_content = ET.tostring(xml_file_root, encoding='UTF-8', pretty_print=True, xml_declaration=True)
with open(xml_file_path, 'w') as output_file:
output_file.write(xml_content)
output_file.close()
def get_xml_schemalocations(xml_file):
XSI = "http://www.w3.org/2001/XMLSchema-instance"
xml_dir, tail = os.path.split(xml_file)
xml_content = read_file_content(xml_file)
tree = ET.XML(xml_content)
schema_locs = []
#Find instances of xsi:schemaLocation
schema_locations = set(tree.xpath("//*/@xsi:schemaLocation", namespaces={'xsi': XSI}))
for schema_location in schema_locations:
namespaces_locations = schema_location.strip().split()
# Import all fnamspace/schema location pairs
for namespace, location in zip(*[iter(namespaces_locations)] * 2):
loc = os.path.abspath(os.path.join(xml_dir, location))
if not os.path.exists(loc):
print("ERROR: XML-Schema file not found: %s" % loc)
raise XMLSchemaNotFound("XML-Schema file not found: %s" % loc)
schema_locs.append(loc)
return schema_locs
def change_dcat_element_values(md_file, substitutions):
xml_content = read_file_content(md_file)
tree = ET.XML(xml_content)
for xpath_expr, value in substitutions.iteritems():
schema_locations = set(tree.xpath(xpath_expr, namespaces={'dct': 'http://purl.org/dc/terms/', 'dcat': 'http://www.w3.org/ns/dcat#'}))
for schema_location in schema_locations:
print(schema_location)
schema_location.text = value
sl = schema_location
xml_content = ET.tostring(tree, encoding='UTF-8', pretty_print=True, xml_declaration=True)
with open(md_file, 'w') as output_file:
output_file.write(xml_content)
output_file.close()
def get_dcat_element_values(md_file, substitutions):
xml_content = read_file_content(md_file)
tree = ET.XML(xml_content.encode('utf-8'))
for xpath_expr, value in substitutions.items():
schema_locations = set(tree.xpath(xpath_expr, namespaces={'dct': 'http://purl.org/dc/terms/', 'dcat': 'http://www.w3.org/ns/dcat#'}))
for schema_location in schema_locations:
print(schema_location)
schema_location.text = value
sl = schema_location
xml_content = ET.tostring(tree, encoding='UTF-8', pretty_print=True, xml_declaration=True)
return xml_content
|
import helpers
import contextio as c
CONSUMER_KEY = "YOUR_CONTEXTIO_CONSUMER_KEY"
CONSUMER_SECRET = "YOUR_CONTEXTIO_CONSUMER_KEY"
api = c.ContextIO(
consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
debug=True
) # returns v2.0 API by default
helpers.headerprint("FETCHING ACCOUNTS (v2.0 API)")
accounts = api.get_accounts()
print accounts
print "\n"
print "Found {0} accounts.".format(len(accounts))
print "\n"
|
#!/usr/bin/python
# this source is part of my Hackster.io project: https://www.hackster.io/mariocannistra/radio-astronomy-with-rtl-sdr-raspberrypi-and-amazon-aws-iot-45b617
# this program will determine the overall range of signal strengths received during the whole session.
# this program can be run standalone but is usually run at end of session by doscanw.py
# Its output will be stored in 2 files:
# dbminmax.txt and session-overview.png . The first contains two rows of text with just the maximum
# and minimum of the whole session. The second contains a chart of all the min and max values for each of
# the scan files
from glob import glob
import numpy as np
import radioConfig
import subprocess
import os
import datetime
import sys
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def outmsg(smsg):
thisprogmsg = "..findsessionrangew.py: " + smsg
print(thisprogmsg)
def strinsert(source_str, insert_str, pos):
return source_str[:pos]+insert_str+source_str[pos:]
outmsg("Finding session range...")
globmax = -9000
globmin = 9000
sessmin = np.empty(shape=[0, 1])
sessmax = np.empty(shape=[0, 1])
scantimeline = np.empty(shape=[0, 1])
sessionfolder = sys.argv[1]
overviewname = sessionfolder + os.sep + 'session-overview.png'
minmaxname = sessionfolder + os.sep + 'dbminmax.txt'
binpattern = sessionfolder + os.sep + '*.bin'
files_in_dir = sorted(glob(binpattern))
for fname in files_in_dir:
outmsg(fname)
dbs = np.fromfile(fname, dtype='float32')
thismin=dbs.min()
thismax=dbs.max()
scantime=str(fname)[20:26]
scandate=str(fname)[12:20]
if thismin < globmin:
globmin = thismin
if thismax > globmax:
globmax = thismax
sessmin = np.append(sessmin, thismin)
sessmax = np.append(sessmax, thismax)
scantime = strinsert(scantime, ":", 2)
scantime = strinsert(scantime, ":", 5)
scantime = scandate[-2:] + " " + scantime
msg = "%s %s %f %f" % (scandate,scantime,thismin,thismax)
outmsg(msg)
scantimeline = np.append(scantimeline, scantime)
mytitle = 'This session signal range: min %.2f .. max %.2f' % (globmin,globmax)
outmsg(mytitle)
# this red plot will help us in finding the scan with highest power range
# (when using the gainloop.py program it will be useful to find the best gain values)
# adding globmin value just to offset the red plot to the middle of the chart
sessdiff = ( sessmax - sessmin ) + globmin
xs = range(len(scantimeline))
plt.figure(figsize=(12, 9), dpi=600)
plt.xlabel('Scan time (UTC)', fontsize=8)
plt.ylabel('Signal power', fontsize=8)
plt.tick_params(labelsize=8)
plt.plot(xs, sessmax, linestyle='--', marker='o' )
plt.plot(xs, sessmin, linestyle='--', marker='o' )
#plt.plot(xs,sessdiff )
plt.xticks(xs,scantimeline,rotation=70,fontsize=8)
for i,j in zip(xs,sessmin):
tann = '%.1f' % j
plt.annotate( tann, xy=(i,j), xytext=(0,15), textcoords='offset points', fontsize=8 )
for i,j in zip(xs,sessmax):
tann = '%.1f' % j
plt.annotate( tann, xy=(i,j), xytext=(0,-20), textcoords='offset points', fontsize=8 )
plt.grid()
#leg = plt.legend( ('maxima','minima','difference'), loc='upper right' )
leg = plt.legend( ('maxima','minima'), loc='upper right' )
leg.get_frame().set_alpha(0.5)
plt.title(mytitle)
#plt.show()
plt.tight_layout()
plt.savefig(overviewname)
sessfile = open(minmaxname, "w")
sessfile.write(str(globmax))
sessfile.write("\n")
sessfile.write(str(globmin))
sessfile.write("\n")
sessfile.close()
outmsg("Session signal range chart saved")
|
#
# Not used at the moment, but will be needed later
#
import os
def get_nova_credentials():
d = {}
d['version'] = '2'
d['username'] = os.environ['OS_USERNAME']
d['api_key'] = os.environ['OS_PASSWORD']
d['auth_url'] = os.environ['OS_AUTH_URL']
d['project_id'] = os.environ['OS_TENANT_NAME']
return d
def get_keystone_credentials():
d = {}
d['username'] = os.environ['OS_USERNAME']
d['password'] = os.environ['OS_PASSWORD']
d['auth_url'] = os.environ['OS_AUTH_URL']
d['tenant_name'] = os.environ['OS_TENANT_NAME']
return d
|
import io
import os
import re
from dataclasses import dataclass, field
from typing import BinaryIO, Dict, Tuple, cast
from .cov_info import BBEntry, CodeBlock, CoverageInfo
from .exceptions import InvalidBBTableHeader, InvalidHeader, InvalidModuleTableHeader
from .module_table_entries import get_proper_module_table_entry_cls
@dataclass
class DrCov:
cov_infos: Dict[int, CoverageInfo] = field(default_factory=dict)
def __str__(self) -> str:
return "\n\n".join(
f"module_id: {i}\n" + str(cov_info)
for i, cov_info in self.cov_infos.items()
)
def import_from_file(self, file_path: str) -> None:
if not os.path.exists(file_path):
raise FileNotFoundError(f"{file_path} does not exist")
with open(file_path, "rb") as fin:
self.import_from_binaryio(fin)
def import_from_bytes(self, data: bytes) -> None:
self.import_from_binaryio(io.BytesIO(data))
@staticmethod
def _is_supported_drcov_version(version: int) -> bool:
return 2 <= version <= 3
def import_from_binaryio(self, bio: BinaryIO) -> None:
version, _ = self._read_header(bio)
if not self._is_supported_drcov_version(version):
raise InvalidHeader(f"DRCOV VERSION: {version} is not supported yet")
self._read_module_table(bio)
self._read_bb_table(cast(io.BytesIO, bio))
@staticmethod
def _read_header(bio: BinaryIO) -> Tuple[int, bytes]:
version_header = bio.readline()
if not version_header.startswith(b"DRCOV VERSION:"):
raise InvalidHeader(version_header.decode("utf-8"))
flavor_header = bio.readline()
if not flavor_header.startswith(b"DRCOV FLAVOR: drcov"):
raise InvalidHeader(flavor_header.decode("utf-8"))
version = version_header.split(b":")[-1].strip()
flavor = flavor_header.split(b":")[-1].strip()
return int(version), flavor
@staticmethod
def _is_supported_module_table_ver(mod_table_ver: int) -> bool:
return 2 <= mod_table_ver <= 5
@staticmethod
def _parse_module_table_header(module_table_header_line: bytes) -> Tuple[int, int]:
# Module Table: version {ver}, count {n_modules}
mobj = re.fullmatch(
r"Module Table: version (\d+), count (\d+)", module_table_header_line.decode("utf-8").strip()
)
if mobj is None:
raise InvalidModuleTableHeader(module_table_header_line.decode("utf-8"))
ver = int(mobj.group(1))
n_modules = int(mobj.group(2))
return ver, n_modules
def _read_module_table(self, bio: BinaryIO) -> None:
module_table_header_line = bio.readline()
ver, n_modules = self._parse_module_table_header(module_table_header_line)
if not self._is_supported_module_table_ver(ver):
raise InvalidModuleTableHeader(f"Module Table version {ver} is not supported")
column_header = bio.readline()
module_table_entry_cls = get_proper_module_table_entry_cls(column_header)
for _ in range(n_modules):
module_table_entry = module_table_entry_cls().from_line(bio.readline())
self.cov_infos[
module_table_entry.id # type: ignore
] = module_table_entry.to_coverage_info()
def _read_bb_table(self, bio: io.BytesIO) -> None:
# BB Table: {count} bbs
bb_table_header = bio.readline()
mobj = re.fullmatch(
r"BB Table: (\d+) bbs", bb_table_header.decode("utf-8").strip()
)
if mobj is None:
raise InvalidBBTableHeader(bb_table_header.decode("utf-8"))
n_bb_entries = int(mobj.group(1))
for _ in range(n_bb_entries):
bb_entry = BBEntry()
bio.readinto(bb_entry) # type: ignore
self.cov_infos[bb_entry.mod_id].passed_blocks.append(
CodeBlock(bb_entry.start, bb_entry.size)
)
def export_specific_module_to_file(self, module_name: str, file_path: str) -> None:
with open(file_path, "wb") as fout:
self.export_specific_module_to_binaryio(fout, module_name)
def export_specific_module_to_binaryio(
self, bio: BinaryIO, module_name: str
) -> None:
target_mod_id = next(
i for i, cov_info in self.cov_infos.items() if module_name in cov_info.name
)
if target_mod_id is None:
print(f"{module_name} is not found")
return
DrCov.export_to_binaryio(bio, {target_mod_id: self.cov_infos[target_mod_id]})
def export_to_file(self, file_path: str) -> None:
with open(file_path, "wb") as fout:
self.export_to_binaryio(fout, self.cov_infos)
@staticmethod
def export_to_binaryio(bio: BinaryIO, cov_infos: Dict[int, CoverageInfo]) -> None:
DrCov._write_header(bio)
DrCov._write_module_table(bio, cov_infos)
DrCov._write_bb_table(bio, cov_infos)
@staticmethod
def _write_header(bio: BinaryIO) -> None:
bio.write(b"DRCOV VERSION: 2\n")
bio.write(b"DRCOV FLAVOR: drcov\n")
@staticmethod
def _write_module_table(bio: BinaryIO, cov_infos: Dict[int, CoverageInfo]) -> None:
DrCov._write_module_table_header(bio, len(cov_infos))
for idx, cov_info in cov_infos.items():
DrCov._write_module_table_entry(bio, idx, cov_info)
@staticmethod
def _write_module_table_header(bio: BinaryIO, count: int) -> None:
bio.write(f"Module Table: version 2, count {count}\n".encode("utf-8"))
bio.write(b"Columns: id, base, end, entry, checksum, timestamp, path\n")
@staticmethod
def _write_module_table_entry(
bio: BinaryIO, idx: int, cov_info: CoverageInfo
) -> None:
end_addr = cov_info.base_addr + cov_info.module_size
bio.write(
f"{idx}, {cov_info.base_addr:#x}, {end_addr:#x}, {0:#016x}, {0:#08x}, {0:#08x}, {cov_info.name}\n".encode(
"utf-8"
)
)
@staticmethod
def _write_bb_table(bio: BinaryIO, cov_infos: Dict[int, CoverageInfo]) -> None:
DrCov._write_bb_table_header(
bio, sum(len(cov_info.passed_blocks) for _, cov_info in cov_infos.items())
)
for idx, cov_info in cov_infos.items():
for passed_block in cov_info.passed_blocks:
DrCov._write_bb_table_entry(
cast(io.BytesIO, bio),
passed_block.passed_rva,
passed_block.passed_size,
idx,
)
@staticmethod
def _write_bb_table_header(bio: BinaryIO, count: int) -> None:
bio.write(f"BB Table: {count} bbs\n".encode("utf-8"))
@staticmethod
def _write_bb_table_entry(
bio: io.BytesIO, start_rva: int, bb_size: int, mod_id: int
) -> None:
buffer_ = io.BytesIO()
buffer_.write(BBEntry(start_rva, bb_size, mod_id)) # type: ignore
bio.write(buffer_.getvalue())
|
import datetime
def votar(anonascimento=0):
anonascimento = int(anonascimento)
if anonascimento > ano:
return 'ERRO'
idade = ano - anonascimento
if idade >= 18:
return f'Com {idade} anos podes votar!'
else:
return f'Com {idade} anos não podes votar!'
ano = datetime.datetime.now().year
an = int(input('Ano de nascimento: '))
print(votar(an))
|
import pandas as pd
from mlp import MLP_reg
from neumannS0_mlp import Neumann_mlp
from learning_curves import run
n_iter = 20
n_jobs = 40
n_sizes = [1e5]
n_sizes = [int(i) for i in n_sizes]
n_test = int(1e4)
n_val = int(1e4)
data_type = 'MCAR'
filename = 'MCAR_depth_effect'
compute_br = True
# First fill in data_desc with all default values.
default_values = {'n_features': 20, 'missing_rate': 0.5, 'prop_latent': 0.5,
'snr': 10, 'masking': 'MCAR'}
data_descs = pd.DataFrame([default_values])
methods = []
for q in [1, 2, 3, 4, 5, 10, 15, 20, 30, 40, 50, 100]:
methods.append({'name': 'torchMLP', 'est': MLP_reg, 'type_width': 'linear',
'width': q, 'depth': 1, 'n_epochs': 2000,
'batch_size': 200, 'early_stopping': True,
'verbose': False})
for d in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
methods.append({'name': 'torchMLP', 'est': MLP_reg, 'type_width': 'linear',
'width': 1, 'depth': d, 'n_epochs': 2000,
'batch_size': 200, 'early_stopping': True,
'verbose': False})
for d in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
for residual_connection in [False, True]:
for early_stopping in [False, True]:
methods.append(
{'name': 'Neumann', 'est': Neumann_mlp, 'depth': d,
'n_epochs': 100, 'batch_size': 10,
'early_stopping': early_stopping,
'residual_connection': residual_connection,
'verbose': False})
run_params = {
'n_iter': n_iter,
'n_sizes': n_sizes,
'n_test': n_test,
'n_val': n_val,
'data_type': data_type,
'data_descs': data_descs,
'methods': methods,
'compute_br': compute_br,
'filename': filename,
'n_jobs': n_jobs}
run(**run_params)
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.fixtures import BaseTestFixture
from cloudcafe.bare_metal.composites import BareMetalComposite
from cloudcafe.common.resources import ResourcePool
class BareMetalFixture(BaseTestFixture):
@classmethod
def setUpClass(cls):
super(BareMetalFixture, cls).setUpClass()
cls.bare_metal = BareMetalComposite()
cls.chassis_client = cls.bare_metal.chassis.client
cls.drivers_client = cls.bare_metal.drivers.client
cls.nodes_client = cls.bare_metal.nodes.client
cls.ports_client = cls.bare_metal.ports.client
cls.resources = ResourcePool()
cls.addClassCleanup(cls.resources.release)
@classmethod
def _create_chassis(cls):
cls.chassis_description = 'test_chassis'
cls.chassis_extra = {'key1': 'value1'}
cls.create_chassis_resp = cls.chassis_client.create_chassis(
description=cls.chassis_description, extra=cls.chassis_extra)
if cls.create_chassis_resp.ok:
cls.chassis = cls.create_chassis_resp.entity
cls.resources.add(cls.chassis.uuid,
cls.chassis_client.delete_chassis)
else:
cls.assertClassSetupFailure("Unable to create a chassis.")
@classmethod
def _create_node(cls):
cls.node_driver = "fake"
cls.node_properties = {'property1': 'value1'}
cls.driver_info = {'info1': 'value2'}
cls.node_extra = {'meta1': 'value3'}
cls.create_node_resp = cls.nodes_client.create_node(
chassis_uuid=cls.chassis.uuid,
driver=cls.node_driver,
properties=cls.node_properties,
driver_info=cls.driver_info,
extra=cls.node_extra)
if not cls.create_node_resp.ok:
cls.assertClassSetupFailure("Unable to create a node.")
cls.node = cls.create_node_resp.entity
cls.resources.add(cls.node.uuid,
cls.nodes_client.delete_node)
@classmethod
def _create_port(cls):
cls.mac_address = '5d:9a:1f:12:d5:0e'
cls.port_extra = {'meta1': 'value1'}
cls.create_port_resp = cls.ports_client.create_port(
node_uuid=cls.node.uuid,
address=cls.mac_address,
extra=cls.port_extra)
if not cls.create_port_resp.ok:
cls.assertClassSetupFailure("Unable to create a port.")
cls.port = cls.create_port_resp.entity
cls.resources.add(cls.port.uuid,
cls.ports_client.delete_port)
|
import numpy as np
from coptim.optimizer import Optimizer
class GradientMethodExactMinimization(Optimizer):
def __init__(self):
# TODO: More metrics: vector of x's, objective values, etc.
self.iterations = 0
def step_size(self, Q, c, x, d, func):
g = func.gradient(Q, c, x)
return -1 * g.T.dot(d) / d.T.dot(Q).dot(d)
def optimize(self, x_0, func, delta, epsilon):
Q = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, delta]])
c = np.array([1, 1, 1, 1])
x = x_0
while self.stopping_criteria(x, Q, c, func, epsilon):
descent_direction = -1 * func.gradient(Q, c, x)
step_size = self.step_size(Q, c, x, descent_direction, func)
# update step
x = x + step_size * descent_direction
self.iterations += 1
return x
def stopping_criteria(self, x, Q, c, func, epsilon):
return np.linalg.norm(func.gradient(Q, c, x)) >= epsilon
|
from keras.optimizers import Adam
class DiffusionVAEParams(object):
'''
classdocs
'''
def __init__(self,
steps=10,
truncation_radius=0.5,
var_x=1.0,
r_loss="mse",
d=2,
optimizer = Adam(),
controlled_capacity = False,
min_capacity = 0.0,
max_capacity = 0.0
):
'''
Constructor
'''
# Data parameters
# Architecture parameters
self.r_loss = r_loss
self.optimizer = optimizer
# Sampling parameters
self.steps = steps# Calculate the relevant quantities
self.truncation_radius = truncation_radius
# Decoder parameters
self.var_x = var_x
# Manifold parameters
self.d = d
# Controlled capacity (Understanding disentangling in beta-VAE, Burgess et al)
self.controlled_capacity = controlled_capacity
self.min_capacity = min_capacity
self.max_capacity = max_capacity
# Summary parameters
self.params_dict = self.params_to_dictionary()
def params_to_dictionary(self):
dictionary = {"r_loss": self.r_loss,
"var_x": self.var_x,
"steps": self.steps,
"truncation_radius": self.truncation_radius,
"d": self.d,
"controlled_capacity": self.controlled_capacity,
"min_capacity": self.min_capacity,
"max_capacity": self.max_capacity}
return dictionary
#def params_to_df(self):
# df = pd.DataFrame(self.params_dict)
# return df
|
### Backend for testing
###Import packages
import sys, os, os.path, h5py, time, shutil
from os import walk
import pandas as pd
import xgboost as xgb
import numpy as np
import matplotlib.pyplot as plt
from shutil import copy
from sklearn.metrics import accuracy_score, f1_score
from scipy.optimize.minpack import leastsq
from diffpy.Structure import loadStructure
from diffpy.srfit.pdf import PDFContribution
from diffpy.srfit.fitbase import FitRecipe, FitResults
## From new fitting
#import numpy as np
#import matplotlib.pyplot as plt
#from scipy.optimize.minpack import leastsq
#from diffpy.Structure import loadStructure
from diffpy.Structure import Structure
from diffpy.Structure import Atom
from diffpy.Structure.expansion import supercell
from diffpy.srfit.pdf import PDFContribution, DebyePDFGenerator, PDFParser
from diffpy.srfit.fitbase import FitRecipe, FitResults,FitContribution
from ase.cluster.decahedron import Decahedron
from ase.io import write
from ase.cluster.icosahedron import Icosahedron
from ase.cluster.octahedron import Octahedron
from ase.cluster.wulff import wulff_construction
import time, ase, glob, pdb
from ase.cluster.cubic import FaceCenteredCubic, BodyCenteredCubic, SimpleCubic
from ase.lattice.cubic import Diamond
from ase.lattice.tetragonal import SimpleTetragonal, CenteredTetragonal
from ase.lattice.orthorhombic import SimpleOrthorhombic, BaseCenteredOrthorhombic, FaceCenteredOrthorhombic, BodyCenteredOrthorhombic
from ase.lattice.monoclinic import SimpleMonoclinic, BaseCenteredMonoclinic
from ase.lattice.triclinic import Triclinic
from ase.lattice.hexagonal import Hexagonal, HexagonalClosedPacked, Graphite
from diffpy.srfit.fitbase import Profile #tilføjet til backend
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
from scipy.optimize.minpack import leastsq
from scipy.optimize import minimize
#import pandas as pd
from multiprocessing import Pool
def sort_filenames(xyz_path, print_level):
#Sort through all filenames and sort them into different lists based on structure:
_, _, filenames = next(walk(xyz_path)) #loads all filenames into a list called filenames
#structures_list = ["SC", "FCC", "BCC", "HCP", "Icosahedron", "Decahedron", "Octahedron"]
SC_filenames, FCC_filenames, BCC_filenames, HCP_filenames, ICO_filenames, DEC_filenames, OCT_filenames = [], [], [], [], [], [], []
for xyzfile in filenames:
if xyzfile[0] == "S":
SC_filenames.append(xyzfile)
if xyzfile[0] == "F":
FCC_filenames.append(xyzfile)
if xyzfile[0] == "B":
BCC_filenames.append(xyzfile)
if xyzfile[0] == "H":
HCP_filenames.append(xyzfile)
if xyzfile[0] == "I":
ICO_filenames.append(xyzfile)
if xyzfile[0] == "D":
DEC_filenames.append(xyzfile)
if xyzfile[0] == "O":
OCT_filenames.append(xyzfile)
sorted_filenames = [SC_filenames, FCC_filenames, BCC_filenames, HCP_filenames, ICO_filenames, DEC_filenames, OCT_filenames]
if print_level == True:
print("All xyz files have been sorted into lists")
return sorted_filenames
def get_testing_backend(xyz_folder_name):
#Sorted filenames:
xyz_path = "xyz_files/" + xyz_folder_name + "/" #Path where the xyz files are
sorted_filenames = sort_filenames(xyz_path, print_level = False)
sorted_filenames_flat = [item for sublist in sorted_filenames for item in sublist]
return sorted_filenames_flat, xyz_path #Eller bare lav den her global? Det giver et simplere dokument måske idk
def load_PDFs(folder_name):
pdf_path = "PDF_datasets/" + folder_name
#Check if the requested folder name exists
if not os.path.isdir(pdf_path):
print("This folder doesn't exist. Choose an existing folder.")
#raise ValueError("This folder doesn't exist. Choose an existing folder.")
return
X_train = pd.read_hdf(pdf_path + "/X_train.h5", key='df')
y_train = pd.read_hdf(pdf_path + "/y_train.h5", key='df')
X_val = pd.read_hdf(pdf_path + "/X_val.h5", key='df')
y_val = pd.read_hdf(pdf_path + "/y_val.h5", key='df')
X_test = pd.read_hdf(pdf_path + "/X_test.h5", key='df')
y_test = pd.read_hdf(pdf_path + "/y_test.h5", key='df')
return X_train, y_train, X_val, y_val, X_test, y_test
def test_model(X_test, y_test, model): #Test the model on the test dataset, prediction on the test set
xgb_test = xgb.DMatrix(X_test, y_test)
start_time = time.time()
y_pred_proba_test = model.predict(xgb_test)
stop_time = time.time()
y_pred_test = [np.argmax(line) for line in y_pred_proba_test]
y_pred_proba_test_list = y_pred_proba_test.tolist()
#print("y_pred_test:")
#print(y_pred_test)
#Finding the accuracy of the top3 guesses:
y_pred_proba_top3 = [] #[the largest 3 %s, ...]
for guesslist in y_pred_proba_test: #Finds the five largest percentages
guesslist.sort()
top3 = guesslist[-3::]
y_pred_proba_top3.append(top3)
y_pred_proba_top3_index = [] #[[index for the largest 3 %], ... ]
for top3list in range(len(y_pred_proba_top3)): #Finds the indexes of the five largest percentages, meaning which 5 guesses
top3index = []
for percent in y_pred_proba_top3[top3list]:
indexofguess = y_pred_proba_test_list[top3list].index(percent)
top3index.append(indexofguess)
y_pred_proba_top3_index.append(top3index)
correct_guesses_num = 0
for correctguessindex in range(len(y_test)):
for guess in y_pred_proba_top3_index[correctguessindex]: #Checks if any of the guesses are the right guess
if y_test[correctguessindex] == guess:
correct_guesses_num += 1
top3accuracy = 100 * correct_guesses_num / len(y_test)
#print(f"There were {correct_guesses_num} correct guesses in top 3 guesses out of {len(y_test)}")
#Finding the accuracy of the top5 guesses:
y_pred_proba_top5 = [] #[The largest 5 %s, ...]
for guesslist in y_pred_proba_test: #Finds the five largest percentages
guesslist.sort()
top5 = guesslist[-5::]
y_pred_proba_top5.append(top5)
y_pred_proba_top5_index = [] #[[index for the largest 5 %s], ... ]
for top5list in range(len(y_pred_proba_top5)): #Finds the indexes of the five largest percentages, meaning which 5 guesses
top5index = []
for percent in y_pred_proba_top5[top5list]:
indexofguess = y_pred_proba_test_list[top5list].index(percent)
top5index.append(indexofguess)
y_pred_proba_top5_index.append(top5index)
correct_guesses_num = 0
for correctguessindex in range(len(y_test)):
for guess in y_pred_proba_top5_index[correctguessindex]: #Checks if any of the guesses are the right guess
if y_test[correctguessindex] == guess:
correct_guesses_num += 1
top5accuracy = 100 * correct_guesses_num / len(y_test)
#print(f"There were {correct_guesses_num} correct guesses in top 5 guesses out of {len(y_test)}")
print("Percent guessed structures in test set:", str(accuracy_score(y_test, y_pred_test)*100)[0:5], "%")
print(f"Percent guessed structures in top3 in test set: {str(top3accuracy)[0:5]} %")
print(f"Percent guessed structures in top5 in test set: {str(top5accuracy)[0:5]} %")
print("Time spent on predicting with model:", str((stop_time-start_time)/60)[0:6], " min")
return None
def load_exp_pdf(exp_filename):
exp_path = "ExperimentalData/"
if not os.path.isfile(exp_path + exp_filename):
print("The file is not located in the /ExperimentalData folder")
return None
else:
for skiprows in range(50):
try:
label = np.loadtxt(exp_path+exp_filename, skiprows = skiprows)
break
except:
pass
#if not label:
# print("File was not loaded")
xgrid, xyz_pdf = label.T[0], label.T[1]
if xgrid[0] != 0.0:
#If it does not start from 0
start_of_xgrid = [n/100 for n in range(int(min(xgrid)/0.01))] #0.0, 0.01, 0.02, ...,
xgrid_new = np.append(start_of_xgrid, xgrid) #new x_grid
xyz_pdf = np.append(np.zeros(int(min(xgrid)/0.01)), xyz_pdf)
xgrid = xgrid_new
if xgrid[1] - xgrid[0] == 0.01:
xyz_pdf = xyz_pdf[::10]
xgrid = xgrid[::10]
xyz_pdf = xyz_pdf[0:300]
xgrid = xgrid[0:300]
if len(xyz_pdf) < 300:
while len(xyz_pdf) < 300:
xyz_pdf = np.append(xyz_pdf, np.array([0]))
xgrid = np.append(xgrid, np.array([xgrid[-1]+0.1]))
xyz_pdf_raw = xyz_pdf
xyz_pdf = xyz_pdf / max(xyz_pdf)
data = np.column_stack([xgrid, xyz_pdf])
datafile_path = "ExperimentalData/Cleaned/" + exp_filename
np.savetxt(datafile_path , data, fmt=['%2.1f','%2.5f'])
return xgrid, xyz_pdf, xyz_pdf_raw
def plot_loaded_PDF(xgrid, xyz_pdf, xyz_pdf_raw, exp_filename):
plt.figure(figsize = (10, 5))
plt.plot(xgrid, xyz_pdf, label = "Imported experimental PDF")
plt.plot(xgrid, xyz_pdf_raw, label = "Raw experimental PDF")
plt.title(exp_filename)
plt.xlabel("r [Å]")
plt.ylabel("G(r) [a.u.]")
plt.legend()
plt.show()
return None
def model_predict_cluster(xyz_path, xyz_pdf, exp_filename, model, sorted_filenames_flat, Qmin, Qmax, Qdamp):
"""
Takes a PDF with 300 points, throws it into the model, prints the 5 best guesses.
"""
xyz_pdf = xyz_pdf/max(xyz_pdf)
xyz_pdf = np.append(xyz_pdf, Qmin)
xyz_pdf = np.append(xyz_pdf, Qmax)
xyz_pdf = np.append(xyz_pdf, Qdamp)
mad = xyz_pdf.reshape((1,-1))
xgb_test = xgb.DMatrix(mad) #, label = columnsliste)
percentages = model.predict(xgb_test)
percentages_list = percentages[0].tolist()
indexn = percentages_list.index(max(percentages[0]))
indexfilename = sorted_filenames_flat[indexn]
percentages_list_sorted = sorted(percentages_list)
top5list = percentages_list_sorted[::-1][0:5]
print(f"The model was given cluster: {exp_filename}")
print(f"The model predicts the following:")
indexn_list = [] #These will be the indices of the top 5 guesses
guess_filenames = []
for n in range(len(top5list)):
indexn = percentages_list.index(top5list[n])
indexn_list.append(indexn)
indexfilename = sorted_filenames_flat[indexn]
guess_filenames.append(indexfilename)
print(f"Prediction {n+1} with {str(top5list[n]*100)[0:4]} % is: {indexfilename}")
#So we want to make a directory for the results
os.makedirs("Results/" + "Results_" + exp_filename[0:-3], exist_ok=True)
#Then, copy over the 5 guessed xyz files to the results folder
shutil.copy(xyz_path+str(guess_filenames[0]), "Results/" + "Results_" + exp_filename[0:-3] + "/1_guess_" + str(guess_filenames[0]))
shutil.copy(xyz_path+str(guess_filenames[1]), "Results/" + "Results_" + exp_filename[0:-3] + "/2_guess_" + str(guess_filenames[1]))
shutil.copy(xyz_path+str(guess_filenames[2]), "Results/" + "Results_" + exp_filename[0:-3] + "/3_guess_" + str(guess_filenames[2]))
shutil.copy(xyz_path+str(guess_filenames[3]), "Results/" + "Results_" + exp_filename[0:-3] + "/4_guess_" + str(guess_filenames[3]))
shutil.copy(xyz_path+str(guess_filenames[4]), "Results/" + "Results_" + exp_filename[0:-3] + "/5_guess_" + str(guess_filenames[4]))
print("The five predicted clusters have been saved to Results/" + str(exp_filename[0:-3]) + "/")
return indexn_list, guess_filenames, top5list
def fit(Qmin, Qmax, Qdamp, cluster, PDFfile, plot):
# Create a PDF contribution as before
pdfprofile = Profile()
pdfparser = PDFParser()
pdfparser.parseFile(PDFfile)
pdfprofile.loadParsedData(pdfparser)
pdfprofile.setCalculationRange(xmin = 1.5, xmax = 20, dx=0.01)
# Setup the PDFgenerator that calculates the PDF from the model
#Generator for first cluster
pdfgenerator = DebyePDFGenerator("G")
pdfgenerator._calc.evaluatortype = 'OPTIMIZED'
#Input the data files
#cluster = loadStructure(stru)
pdfgenerator.setStructure(cluster, periodic=False)
# Add the profile and generator the the PDFcontribution
pdfcontribution = FitContribution("pdf")
pdfcontribution.setProfile(pdfprofile, xname="r")
pdfcontribution.addProfileGenerator(pdfgenerator)
pdfcontribution.setEquation("scale*G")
# Moving on
recipe = FitRecipe()
recipe.addContribution(pdfcontribution)
recipe.addVar(pdfcontribution.scale, 0.1, tag = "scale")
recipe.restrain("scale", lb=0.1, ub=1e99, sig=0.001)
pdfgenerator.qdamp.value = Qdamp
pdfgenerator.setQmax(Qmax)
pdfgenerator.setQmin(Qmin)
# Add ADP for the cluster
phase_molecule = pdfgenerator.phase
atoms1 = phase_molecule.getScatterers()
#Make latices to the two phases
lat = phase_molecule.getLattice()
#Make new variable zoomscale
recipe.newVar("zoomscale", 1.00, tag = "lat")
recipe.constrain(lat.a, 'zoomscale')
recipe.constrain(lat.b, 'zoomscale')
recipe.constrain(lat.c, 'zoomscale')
# We create the variables of ADP and assign the initial value to them. In this
# example, we use isotropic ADP for all atoms
Biso = recipe.newVar("Biso", value=0.2, tag = 'ADP')
recipe.restrain(Biso, lb=0, ub=2, sig = 0.01)
# For all atoms in the structure model, we constrain their Biso according to their species
for atom in atoms1:
if atom.element == cluster.element[0]:
recipe.constrain(atom.Biso, Biso)
recipe.clearFitHooks()
# Tune PDF
recipe.fix("all")
recipe.free("lat")
leastsq(recipe.residual, recipe.getValues())
recipe.free("scale")
leastsq(recipe.residual, recipe.getValues())
recipe.free("ADP")
leastsq(recipe.residual, recipe.getValues())
res = FitResults(recipe)
rfactor = res.rw
# All this should be pretty familiar by now.
r = recipe.pdf.profile.x
g = recipe.pdf.profile.y
gcalc = recipe.pdf.profile.ycalc
diffzero = -0.8 * max(g) * np.ones_like(g)
diff = g - gcalc + diffzero
if plot:
output_notebook()
tools = "hover, box_zoom, undo, crosshair"
p = figure(tools=tools, background_fill_color="darkgray")
p.scatter(r,g,color='blue',legend_label="G(r) Data")
p.line(r, gcalc,color='red',legend_label="G(r) Fit")
p.line(r, diff,color='green',legend_label="G(r) diff")
p.line(r, diffzero,color='black')
show(p)
res.printResults()
return rfactor, r, g, gcalc, diff
def fit_top3(guess_filenames, xyz_path, exp_filename, Qmin, Qmax, Qdamp):
print("\nFit of the first prediction: " + str(guess_filenames[0]))
Bi1 = loadStructure(xyz_path + guess_filenames[0])
PDFFile = "ExperimentalData/Cleaned/" + exp_filename
rfactor1, r, g1, gcalc1, diff1 = fit(Qmin, Qmax, Qdamp = 0.03, cluster = Bi1, PDFfile = PDFFile, plot = True)
print("\nFit of the second prediction: " + str(guess_filenames[1]))
Bi2 = loadStructure(xyz_path + guess_filenames[1])
PDFFile = "ExperimentalData/Cleaned/" + exp_filename
rfactor2, r, g2, gcalc2, diff2 = fit(Qmin, Qmax, Qdamp = 0.03, cluster = Bi2, PDFfile = PDFFile, plot = True)
print("\nFit of the third prediction: " + str(guess_filenames[2]))
Bi3 = loadStructure(xyz_path + guess_filenames[2])
PDFFile = "ExperimentalData/Cleaned/" + exp_filename
rfactor3, r, g3, gcalc3, diff3 = fit(Qmin, Qmax, Qdamp = 0.03, cluster = Bi3, PDFfile = PDFFile, plot = True)
#Save fits
np.savetxt("Results/Results_" + exp_filename[0:-3] + "/fit.txt", np.column_stack([r, g1, g2, g3, gcalc1, gcalc2, gcalc3, diff1, diff2, diff3]))
return None
|
from django.shortcuts import render
# Create your views here.
def index(request):
title = 'Dashboard'
author = 'Made with 💚 by Diaz Ardian'
content = {
'title': title,
'author': author,
'logo': 'KACANG SERVER'
}
return render(request, 'index.html', content) |
from __future__ import print_function
import logging
import numpy as np
import pandas as pd
import sys
import torch
def SaveModelResultsToCSV(MSE, MAE, r2_score, labels, predictions, name):
x_gt = []
y_gt = []
z_gt = []
phi_gt = []
x_pr = []
y_pr = []
z_pr = []
phi_pr = []
r2_score = torch.stack(r2_score, 0)
x = r2_score[:, 0]
r2_score_x = x.cpu().numpy()
y = r2_score[:, 1]
r2_score_y = y.cpu().numpy()
z = r2_score[:, 2]
r2_score_z = z.cpu().numpy()
phi = r2_score[:, 3]
r2_score_phi = phi.cpu().numpy()
MSE = torch.stack(MSE, 0)
x = MSE[:, 0]
MSE_x = x.cpu().numpy()
y = MSE[:, 1]
MSE_y = y.cpu().numpy()
z = MSE[:, 2]
MSE_z = z.cpu().numpy()
phi = MSE[:, 3]
MSE_phi = phi.cpu().numpy()
MAE = torch.stack(MAE, 0)
x = MAE[:, 0]
MAE_x = x.cpu().numpy()
y = MAE[:, 1]
MAE_y = y.cpu().numpy()
z = MAE[:, 2]
MAE_z = z.cpu().numpy()
phi = MAE[:, 3]
MAE_phi = phi.cpu().numpy()
# predictions = torch.stack(predictions, 0)
# predictions = np.reshape(predictions, (-1, 4))
# x = predictions[:, 0]
# predictions_x = x.cpu().numpy()
# y = predictions[:, 1]
# predictions_y = y.cpu().numpy()
# z = predictions[:, 2]
# predictions_z = z.cpu().numpy()
# phi = predictions[:, 3]
# predictions_phi = phi.cpu().numpy()
#
# labels = torch.stack(labels, 0)
# labels = np.reshape(labels, (-1, 4))
# x = labels[:, 0]
# labels_x = x.cpu().numpy()
# y = labels[:, 1]
# labels_y = y.cpu().numpy()
# z = labels[:, 2]
# labels_z = z.cpu().numpy()
# phi = labels[:, 3]
# labels_phi = phi.cpu().numpy()
df = pd.DataFrame(
data={ 'MSE_x': MSE_x, 'MSE_y': MSE_y, 'MSE_z': MSE_z, 'MSE_phi': MSE_phi,
'MAE_x': MAE_x, 'MAE_y': MAE_y, 'MAE_z': MAE_z, 'MAE_phi': MAE_phi,
'r2_score_x': r2_score_x, 'r2_score_y': r2_score_y, 'r2_score_z': r2_score_z, 'r2_score_phi': r2_score_phi})
df.index.name = "epochs"
df.to_csv(name + ".csv", header=True)
|
import argparse
import os
# workaround to unpickle olf model files
import sys
import time
import numpy as np
import torch
from sevn_model.envs import VecPyTorch, make_vec_envs
from sevn_model.utils import get_render_func, get_vec_normalize
sys.path.append('sevn_model')
parser = argparse.ArgumentParser(description='RL')
parser.add_argument(
'--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument(
'--log-interval',
type=int,
default=10,
help='log interval, one log per n updates (default: 10)')
parser.add_argument(
'--env-name',
default='PongNoFrameskip-v4',
help='environment to train on (default: PongNoFrameskip-v4)')
parser.add_argument(
'--load-dir',
default=None,
help='directory to save agent logs (default: ./trained_models/)')
parser.add_argument(
'--load-model',
default='./trained_models/ppo/0/SEVN-Mini-All-Shaped-v1.pt',
help='a path to a particular model')
parser.add_argument(
'--custom-gym',
default='SEVN_gym',
help='The gym to load from')
parser.add_argument(
'--non-det',
action='store_true',
default=False,
help='whether to use a non-deterministic policy')
args = parser.parse_args()
args.det = not args.non_det
env = make_vec_envs(
args.env_name,
args.seed + 1000,
1,
None,
None,
device='cpu',
custom_gym=args.custom_gym,
allow_early_resets=False)
# Get a render function
render_func = get_render_func(env)
# We need to use the same statistics for normalization as used in training
if args.load_dir is not None:
actor_critic, ob_rms = \
torch.load(os.path.join(args.load_dir, args.env_name + ".pt"), map_location='cpu')
else:
actor_critic, ob_rms = \
torch.load(args.load_model, map_location='cpu')
vec_norm = get_vec_normalize(env)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
recurrent_hidden_states = torch.zeros(1,
actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, 1)
obs = env.reset()
render_func('rgb_array', clear=True, first_time=True)
while True:
i = 0
done = False
start = time.time()
r = 0
while i < 300 and not done:
print(i)
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=args.det)
# Obser reward and next obs
obs, reward, done, _ = env.step(action)
r += reward
print("reward: " + str(r))
print(f"action: {action}, reward: {reward}, done: {done}")
masks.fill_(0.0 if done else 1.0)
print("acted: " + str(time.time() - start))
start = time.time()
if render_func is not None:
render_func('rgb_array', clear=False, first_time=False)
print("rendered: " + str(time.time() - start))
i += 1
render_func('rgb_array', clear=True, first_time=False)
|
DEBUG = True
PORT = 8080
EXAMPLE_FOO = 'foo'
EXAMPLE_BAR = 'bar'
|
import pytest
import numpy
from thinc.types import Ragged
from thinc.api import NumpyOps
from ..data_classes import WordpieceBatch
from ..truncate import _truncate_tokens, _truncate_alignment
@pytest.fixture
def sequences():
# Each sequence is a list of tokens, and each token is a number of wordpieces
return [
[1, 3, 1], # So 5 wordpieces this sequence
[3, 7, 1, 1], # 12
[1], # 1
[20, 1], # 21
]
@pytest.fixture
def shape(sequences):
# Get the shape of the input_ids, which includes the padding.
maximum = max(sum(lengths) for lengths in sequences)
return (len(sequences), maximum)
@pytest.fixture
def seq_lengths(sequences):
return numpy.array([sum(seq) for seq in sequences], dtype="i")
@pytest.fixture
def wordpieces(sequences):
strings = []
for token_lengths in sequences:
strings.append([])
for length in token_lengths:
strings[-1].extend(str(i) for i in range(length))
shape = (len(strings), max(len(seq) for seq in strings))
wordpieces = WordpieceBatch(
strings=strings,
input_ids=numpy.zeros(shape, dtype="i"),
token_type_ids=numpy.zeros(shape, dtype="i"),
attention_mask=numpy.zeros((shape[0], shape[1]), dtype="bool"),
lengths=[len(seq) for seq in strings]
)
return wordpieces
@pytest.fixture
def align(sequences):
lengths = []
indices = []
offset = 0
for seq in sequences:
for token_length in seq:
lengths.append(token_length)
indices.extend(i + offset for i in range(token_length))
offset += token_length
return Ragged(numpy.array(indices, dtype="i"), numpy.array(lengths, dtype="i"))
@pytest.fixture
def max_length():
return 6
@pytest.fixture
def mask_from_end(shape, max_length):
n_seq, length = shape
bools = [
numpy.array([i < max_length for i in range(length)], dtype="bool")
for _ in range(n_seq)
]
return numpy.concatenate(bools)
def test_truncate_wordpieces(wordpieces, max_length, mask_from_end):
truncated = _truncate_tokens(wordpieces, mask_from_end)
for i, seq in enumerate(truncated.strings):
assert len(seq) <= max_length
assert seq == wordpieces.strings[i][:max_length]
assert truncated.input_ids[i].shape[0] <= max_length
assert truncated.token_type_ids[i].shape[0] <= max_length
assert truncated.attention_mask[i].shape[0] <= max_length
def test_truncate_alignment_from_end(sequences, max_length, align, mask_from_end):
# print("Max length", max_length)
# print("Sequences", sequences)
# print("Mask", mask_from_end)
ops = NumpyOps()
truncated = _truncate_alignment(align, mask_from_end)
# print(truncated.dataXd.shape, truncated.lengths.sum())
# print("Before", list(map(list, ops.unflatten(align.dataXd, align.lengths))))
# print("After", list(map(list, ops.unflatten(truncated.dataXd, truncated.lengths))))
# Check that the number of tokens hasn't changed. We still need to have
# alignment for every token.
assert truncated.lengths.shape[0] == align.lengths.shape[0]
start = 0
for i, seq in enumerate(sequences):
end = start + len(seq)
# Get the alignment for this sequence of tokens. Each length in the
# alignment indicates the number of wordpiece tokens, so we need to
# check that the sum of the lengths doesn't exceed the maximum.
wp_indices = truncated[start:end]
assert wp_indices.lengths.sum() <= max_length
# We're truncating from the end, so we shouldn't see different values
# except at the end of the sequence.
seen_zero = False
before = align[start:end]
for length_now, length_before in zip(wp_indices.lengths, before.lengths):
if seen_zero:
assert length_now == 0, wp_indices.lengths
elif length_now == 0:
seen_zero = True
else:
length_now == length_before
|
import json
from flask import Blueprint, Response
from rentomatic.repository import memrepo as mr
from rentomatic.use_cases import room_list_use_case as uc
from rentomatic.serializers import room_json_serializer as ser
blueprint = Blueprint('room', __name__)
room1 = {
'code': 'f853578c-fc0f-4e65-81b8-566c5dffa35a',
'size': 215,
'price': 39,
'longitude': -0.09998975,
'latitude': 51.75436293,
}
room2 = {
'code': 'fe2c3195-aeff-487a-a08f-e0bdc0ec6e9a',
'size': 405,
'price': 66,
'longitude': 0.18228006,
'latitude': 51.74640997,
}
room3 = {
'code': '913694c6-435a-4366-ba0d-da5334a611b2',
'size': 56,
'price': 60,
'longitude': 0.27891577,
'latitude': 51.45994069,
}
@blueprint.route('/rooms', methods=['GET'])
def room():
repo = mr.MemRepo([room1, room2, room3])
use_case = uc.RoomListUseCase(repo)
result = use_case.execute()
return Response(json.dumps(result, cls=ser.RoomJsonEncoder), mimetype='application/json', status=200)
|
from homeworks.aleksey_gukov.hw05 import level04
def test():
assert level04.host("test.com/a/b/c") == "test.com"
assert not level04.host("/a/b")
assert level04.host("/a/b") == ""
assert level04.host("https://github.com/tgrx/Z22/") == "github.com"
assert level04.host("git@github.com:tgrx/Z22.git") == "github.com"
|
import os
import sys
import time
import argparse
import subprocess as sp
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(script_dir, "modules"))
from cparser import Parser
from scanner import Scanner, SymbolTableManager
from semantic_analyser import SemanticAnalyser
from code_gen import CodeGen, MemoryManager
# Maximal virtual memory for compiled program process (in bytes).
MAX_VIRTUAL_MEMORY = 50 * 1024 * 1024 # 50 MB
def limit_virtual_memory():
import resource
resource.setrlimit(resource.RLIMIT_AS, (MAX_VIRTUAL_MEMORY, MAX_VIRTUAL_MEMORY))
def compile(args):
print("Compiling", args.source_file)
SymbolTableManager.init()
MemoryManager.init()
parser = Parser(args.source_file)
start = time.time()
parser.parse()
stop = time.time() - start
print(f"Compilation took {stop:.6f} s")
if not SymbolTableManager.error_flag:
print("Compilation successful!")
else:
print("Compilation failed due to the following errors:\n")
print(parser.scanner.lexical_errors)
print(parser.syntax_errors)
print(parser.semantic_analyzer.semantic_errors)
if args.abstract_syntax_tree:
parser.save_parse_tree()
if args.symbol_table:
parser.scanner.save_symbol_table()
if args.tokens:
parser.scanner.save_tokens()
if args.error_files:
parser.save_syntax_errors()
parser.scanner.save_lexical_errors()
parser.semantic_analyzer.save_semantic_errors()
parser.code_generator.save_output()
if args.run and not SymbolTableManager.error_flag:
print("Executing compiled program")
if os.name == "nt":
tester_file = os.path.join(script_dir, "interpreter", "tester_Windows.exe")
elif os.name == "posix":
tester_file = os.path.join(script_dir, "interpreter", "tester_Linux.out")
else:
tester_file = os.path.join(script_dir, "interpreter", "tester_Mac.out")
output_file = os.path.join(script_dir, "output", "output.txt")
output_dir = os.path.dirname(output_file)
if os.path.exists(output_file):
preexec_fn = limit_virtual_memory if os.name != "nt" else None
stderr = sp.PIPE if not args.verbose else None
start = time.time()
try:
tester_output = sp.check_output(tester_file, cwd=output_dir,
stderr=stderr, timeout=10,
preexec_fn=preexec_fn).decode("utf-8")
except sp.TimeoutExpired:
print("RuntimeError: Execution timed out!")
else:
if not args.verbose:
tester_output = "\n".join([line.replace("PRINT", "").strip()
for line in tester_output.splitlines()
if line.startswith("PRINT")])
stop = time.time() - start
print(f"Execution took {stop:.6f} s")
print("Program output:")
print(tester_output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simple C Compiler written in Python')
parser.add_argument("source_file", help="Path to C source file.")
parser.add_argument('-r', '--run', action='store_true', help='Run the output program after compilation.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print all used three address codes.')
parser.add_argument('-ef', '--error-files', action='store_true', help='Save compilation errors to text files.')
parser.add_argument('-ast', '--abstract-syntax-tree', action='store_true', help='Save abstract syntax tree into a text file.')
parser.add_argument('-st', '--symbol-table', action='store_true', help='Save symbol table into a text file.')
parser.add_argument('-t', '--tokens', action='store_true', help='Save lexed tokens into a text file.')
args = parser.parse_args()
if not os.path.isabs(args.source_file):
args.source_file = os.path.abspath(script_dir)
args = parser.parse_args()
compile(args)
|
# Copyright (c) Xavier Figueroa
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import xmltodict
from spdx.parsers import jsonyamlxml
class Parser(jsonyamlxml.Parser):
"""
Wrapper class for jsonyamlxml.Parser to provide an interface similar to
RDF and TV Parser classes (i.e., spdx.parsers.<format name>.Parser) for XML parser.
It also avoids to repeat jsonyamlxml.Parser.parse code for JSON, YAML and XML parsers
"""
def __init__(self, builder, logger):
super(Parser, self).__init__(builder, logger)
self.LIST_LIKE_FIELDS = {
"creators",
"externalDocumentRefs",
"extractedLicenseInfos",
"seeAlso",
"annotations",
"relationships",
"snippets",
"licenseInfoFromSnippet",
"reviewers",
"fileTypes",
"licenseInfoFromFiles",
"artifactOf",
"fileContributors",
"fileDependencies",
"excludedFilesNames",
"files",
"documentDescribes",
}
def parse(self, file):
parsed_xml = xmltodict.parse(
file.read(), strip_whitespace=False, encoding="utf-8"
)
fixed_object = self._set_in_list(parsed_xml, self.LIST_LIKE_FIELDS)
self.document_object = fixed_object.get("SpdxDocument").get("Document")
return super(Parser, self).parse()
def _set_in_list(self, data, keys):
"""
xmltodict parse list-like fields in different way when there is only one
of them than when there are several of them.
Set in lists those fields that are expected to be in them.
"""
if isinstance(data, (dict, OrderedDict)):
new_data = OrderedDict()
for k, v in data.items():
if k in keys and not isinstance(v, list):
new_data[k] = [self._set_in_list(v, keys)]
else:
new_data[k] = self._set_in_list(v, keys)
return new_data
elif isinstance(data, list):
new_data = []
for element in data:
new_data.append(self._set_in_list(element, keys))
return new_data
return data
|
"""An example of how to use IPython1 for plotting remote parallel data
The two files plotting_frontend.ipy and plotting_backend.py go together.
To run this example, first start the IPython controller and 4
engines::
ipcluster -n 4
Then start ipython in pylab mode::
ipython -pylab
Then a simple "run plotting_frontend.ipy" in IPython will run the
example. When this is done, all the variables (such as number, downx, etc.)
are available in IPython, so for example you can make additional plots.
"""
import numpy as N
from pylab import *
from IPython.kernel import client
# Get an IPython1 client
rc = client.MultiEngineClient()
rc.get_ids()
# Run the simulation on all the engines
rc.run('plotting_backend.py')
# Bring back the data
number = rc.pull('number')
d_number = rc.pull('d_number')
downx = rc.gather('downx')
downy = rc.gather('downy')
downpx = rc.gather('downpx')
downpy = rc.gather('downpy')
print "number: ", sum(number)
print "downsampled number: ", sum(d_number)
# Make a scatter plot of the gathered data
# These calls to matplotlib could be replaced by calls to pygist or
# another plotting package.
figure(1)
scatter(downx, downy)
xlabel('x')
ylabel('y')
figure(2)
scatter(downpx, downpy)
xlabel('px')
ylabel('py')
show() |
from __future__ import absolute_import
from __future__ import unicode_literals
from git_code_debt.list_metrics import color
from git_code_debt.list_metrics import CYAN
from git_code_debt.list_metrics import main
from git_code_debt.list_metrics import NORMAL
def test_list_metrics_smoke(capsys):
# This test is just to make sure that it doesn't fail catastrophically
main([])
assert capsys.readouterr().out
def test_list_metrics_no_color_smoke(capsys):
main(['--color', 'never'])
out, err = capsys.readouterr()
assert '\033' not in out
assert '\033' not in err
def test_color_no_color():
ret = color('foo', 'bar', False)
assert ret == 'foo'
def test_colored():
ret = color('foo', CYAN, True)
assert ret == CYAN + 'foo' + NORMAL
|
# Form / Checkbox
# Use checkboxes to switch between two mutually exclusive options.
# ---
from h2o_wave import main, app, Q, ui
@app('/demo')
async def serve(q: Q):
if q.args.show_inputs:
q.page['example'].items = [
ui.text(f'checkbox_unchecked={q.args.checkbox_unchecked}'),
ui.text(f'checkbox_checked={q.args.checkbox_checked}'),
ui.text(f'checkbox_indeterminate={q.args.checkbox_indeterminate}'),
ui.text(f'checkbox_unchecked_disabled={q.args.checkbox_unchecked_disabled}'),
ui.text(f'checkbox_checked_disabled={q.args.checkbox_checked_disabled}'),
ui.text(f'checkbox_indeterminate_disabled={q.args.checkbox_indeterminate_disabled}'),
ui.button(name='show_form', label='Back', primary=True),
]
else:
q.page['example'] = ui.form_card(box='1 1 4 10', items=[
ui.checkbox(name='checkbox_unchecked', label='Not checked'),
ui.checkbox(name='checkbox_checked', label='Checked', value=True),
ui.checkbox(name='checkbox_indeterminate', label='Indeterminate', indeterminate=True),
ui.checkbox(name='checkbox_unchecked_disabled', label='Not checked (Disabled)', disabled=True),
ui.checkbox(name='checkbox_checked_disabled', label='Checked (Disabled)', value=True, disabled=True),
ui.checkbox(name='checkbox_indeterminate_disabled', label='Indeterminate (Disabled)', indeterminate=True,
disabled=True),
ui.button(name='show_inputs', label='Submit', primary=True),
])
await q.page.save()
|
# import pydrakeik first. This is a workaround for the issue:
# https://github.com/RobotLocomotion/director/issues/467
from director import pydrakeik
import director
from director import robotsystem
from director.consoleapp import ConsoleApp
from director import transformUtils
from director import robotstate
from director import ikplanner
from director import visualization as vis
from director import objectmodel as om
import numpy as np
import time
import itertools
def onMatlabStartup(ikServer, startSuccess):
assert startSuccess
runTest()
def computeIk(goalFrame, constraints, ikParameters, seedPoseName, nominalPoseName):
constraints[-2].referenceFrame = goalFrame
constraints[-1].quaternion = goalFrame
cs = ikplanner.ConstraintSet(robotSystem.ikPlanner, constraints, '', '')
cs.seedPoseName = seedPoseName
cs.nominalPoseName = nominalPoseName
return cs.runIk()
def runTest():
side = 'left'
testTolerances = False
renderAllSamples = True
randomizeSamples = True
samplesPerJoint = 10
jointLimitPadding = np.radians(5)
ikPlanner = robotSystem.ikPlanner
jointController = robotSystem.robotStateJointController
robotModel = robotSystem.robotStateModel
if app.getTestingEnabled():
samplesPerJoint = 2
jointGroup = str('%s Arm' % side).title()
jointNames = ikPlanner.getJointGroup(jointGroup)
jointIndices = [robotstate.getDrakePoseJointNames().index(name) for name in jointNames]
jointLimits = np.array([robotModel.model.getJointLimits(jointName) for jointName in jointNames])
otherJoints = [name for name in robotstate.getDrakePoseJointNames() if name not in jointNames]
jointSamples = []
for name, limit in zip(jointNames, jointLimits):
jointMin = limit[0] + jointLimitPadding
jointMax = limit[1] - jointLimitPadding
samples, spacing = np.linspace(jointMin, jointMax, samplesPerJoint, retstep=True)
jointSamples.append(samples)
print 'joint name:', name
print 'joint range: [%.4f, %.4f]' % (limit[0], limit[1])
print 'joint number of samples:', samplesPerJoint
print 'joint sample spacing: %.4f' % spacing
totalSamples = np.product([len(x) for x in jointSamples])
print 'total number of samples:', totalSamples
allSamples = list(itertools.product(*jointSamples))
if randomizeSamples:
np.random.shuffle(allSamples)
if 'endEffectorConfig' in robotSystem.directorConfig:
linkName = robotSystem.directorConfig['endEffectorConfig']['endEffectorLinkNames'][0]
else:
linkName = ikPlanner.getHandLink(side)
linkFrame = robotModel.getLinkFrame(linkName)
constraints = []
constraints.append(ikPlanner.createPostureConstraint('q_nom', otherJoints))
constraints.extend(ikPlanner.createSixDofLinkConstraints(jointController.q, linkName))
def setTolerance(distance, angleInDegrees):
constraints[-1].angleToleranceInDegrees = angleInDegrees
constraints[-2].upperBound = np.ones(3)*distance
constraints[-2].lowerBound = np.ones(3)*-distance
setTolerance(0.005, 0.5)
ikParameters = ikplanner.IkParameters()
ikParameters.setToDefaults()
ikParameters.majorIterationsLimit = 10000
ikParameters.majorOptimalityTolerance = 1e-4
ikParameters.majorFeasibilityTolerance = 1e-6
#seedPoseName = 'q_nom'
#nominalPoseName = 'q_nom'
seedPoseName = 'sample_pose'
nominalPoseName = 'sample_pose'
print
print 'constraints:'
print
print constraints[-2]
print
print constraints[-1]
print
print ikParameters
print
print 'seed pose name:', seedPoseName
print 'nominal pose name:', nominalPoseName
print
ikPlanner.addPose(jointController.q, 'sample_pose')
endPose, info = computeIk(linkFrame, constraints, ikParameters, seedPoseName, nominalPoseName)
assert info == 1
assert np.allclose(endPose, jointController.q)
q = jointController.q.copy()
nom_sample = q[jointIndices].copy()
sampleCount = 0
totalSampleCount = 0
badSampleCount = 0
sampleL2NormAccum = 0.0
startTime = time.time()
for sample in allSamples:
sampleCount += 1
totalSampleCount += 1
dist = np.linalg.norm(sample - nom_sample)
sampleL2NormAccum += dist
q[jointIndices] = sample
jointController.setPose('sample_pose', q)
ikPlanner.addPose(q, 'sample_pose')
if renderAllSamples:
view.forceRender()
targetFrame = robotModel.getLinkFrame(linkName)
#pos, quat = transformUtils.poseFromTransform(frame)
endPose, info = computeIk(targetFrame, constraints, ikParameters, seedPoseName, nominalPoseName)
if info >= 10:
print
print 'bad info:', info
jointController.addPose('bad', endPose)
print 'sample num:', totalSampleCount
print 'sample:', sample
print
badSampleCount += 1
errorRate = badSampleCount/float(totalSampleCount)
print 'error rate: %.2f' % errorRate
print 'avg pose l2 norm:', sampleL2NormAccum/totalSampleCount
if testTolerances:
succeeded = False
for tol in [(0.01, 1), (0.01, 2), (0.02, 2), (0.02, 3), (0.03, 3), (0.03, 5), (0.04, 5), (0.05, 5), (0.1, 10), (0.2, 20)]:
print 'retry tolerance:', tol
setTolerance(tol[0], tol[1])
endPose, info = computeIk(frame, constraints, ikParameters, seedPoseName, nominalPoseName)
if info < 10:
succeeded = True
print 'Worked!'
break
setTolerance(0.005, 0.5)
if not succeeded:
print 'Giving up after retries.'
continue
timeNow = time.time()
elapsed = timeNow - startTime
if elapsed > 1.0:
view.forceRender()
print '%d samples/sec' % (sampleCount / elapsed), '%d total samples' % totalSampleCount
startTime = timeNow
sampleCount = 0
if app.getTestingEnabled():
assert badSampleCount == 0
app.quit()
app = ConsoleApp()
app.setupGlobals(globals())
view = app.createView()
view.show()
robotSystem = robotsystem.create(view, planningOnly=True)
view.resetCamera()
if robotSystem.ikPlanner.planningMode == 'pydrake':
robotSystem.ikPlanner.plannerPub._setupLocalServer()
runTest()
elif robotSystem.ikPlanner.planningMode == 'matlabdrake':
robotSystem.ikServer.connectStartupCompleted(onMatlabStartup)
robotSystem.startIkServer()
app.start(enableAutomaticQuit=False)
# after the app starts, runTest() will be called by onMatlabStartup
|
#!/usr/bin/python
import argparse, sys, re, random, os, gzip
from multiprocessing import Pool, Lock, cpu_count, Queue
from subprocess import Popen, PIPE
import SimulationBasics
from VCFBasics import VCF
from SequenceBasics import read_fasta_into_hash
from TranscriptomeBasics import Transcriptome
from GenePredBasics import GenePredEntry
from RangeBasics import Bed,Locus,Loci
from FASTQPrecomputedProfileBasics import default_illumina_1_9 as default_illumina, default_pacbio_ccs95, default_pacbio_subreads
import FASTQBasics
write_lock = Lock()
gcounter = 0
shand1 = None #handles for writing out short reads
shand2 = None
emissions_reports = []
def main():
parser = argparse.ArgumentParser(description="Create a simulated RNA-seq dataset")
group0 = parser.add_mutually_exclusive_group(required=True)
group0.add_argument('--load_biallelic_transcriptome',help="SERIALIZED BIALLELIC TRANSCRIOTOME EMITTER FILE to load up and use instead of all other file inputs")
group0.add_argument('--inputs',nargs=3,help="<reference_genome> <phased_VCF> <transcripts_genepred>")
#parser.add_argument('reference_genome',help="The reference genome.")
#parser.add_argument('phased_VCF',help="A phased VCF file. If you are simulating the genomes that step can make on of these for you.")
#parser.add_argument('transcripts_genepred',help="A genepred file describing the transcripts. Each transcript name must be unique.")
group = parser.add_mutually_exclusive_group()
group.add_argument('--uniform_expression',action='store_true',help="Uniform distribution of transcript expression")
group.add_argument('--isoform_expression',help="The transcript expression in TSV format <Transcript name> tab <Expression>")
group.add_argument('--cufflinks_isoform_expression',help="The expression of the isoforms or - for a uniform distribution of transcript expression")
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--ASE_identical',type=float,help="The ASE for the transcriptome, every isoform will have the same allele preference.")
group2.add_argument('--ASE_isoform_random',action='store_true',help="The ASE will be random for every isoform.")
group2.add_argument('--ASE_locus_random',action='store_true',help="The ASE will be randomly assigned for each locus")
parser.add_argument('--short_read_count',type=int,default=10000,help="INT number of short reads")
parser.add_argument('--short_read_length',type=int,default=101,help="INT length of the short reads")
parser.add_argument('--long_read_ccs_count',type=int,default=4000,help="INT default number of long reads")
parser.add_argument('--long_read_subread_count',type=int,default=4000,help="INT default number of long reads")
parser.add_argument('--no_errors',action='store_true',help="Do not simulate errors in reads")
parser.add_argument('--threads',type=int,default=cpu_count(),help="Number of threads defaults to cpu_count()")
parser.add_argument('--locus_by_gene_name',action='store_true',help="Faster than the complete calculation for overlapping loci.")
parser.add_argument('--seed',type=int,help="seed to make transcriptome and rho creation deterministic. Reads are still random, its just the transcriptome and rho that become determinisitic.")
group3 = parser.add_mutually_exclusive_group(required=True)
group3.add_argument('--output',help="Directory name for output")
group3.add_argument('--save_biallelic_transcriptome',help="FILENAME output the biallelic transcriptome used to this file and then exit")
parser.add_argument('--starting_read_multiplier',type=int,default=0,help="Used if outputting different reads from object, and you want them number differently give each different set values 0, 1, 2, etc...")
args = parser.parse_args()
fq_prof_illumina = None
fq_prof_pacbio_ccs95 = None
fq_prof_pacbio_subreads = None
if not args.no_errors:
fq_prof_illumina = default_illumina()
fq_prof_pacbio_ccs95 = default_pacbio_ccs95()
fq_prof_pacbio_subreads = default_pacbio_subreads()
rbe = None
if not args.load_biallelic_transcriptome:
# we need to establish the emitter based on some known data
rbe = load_from_inputs(args)
else:
rbe = SimulationBasics.RandomBiallelicTranscriptomeEmitter()
inf = open(args.load_biallelic_transcriptome)
sline = inf.readline().rstrip()
inf.close()
rbe.read_serialized(sline)
if args.save_biallelic_transcriptome:
ofser = open(args.save_biallelic_transcriptome,'w')
ofser.write(rbe.get_serialized())
ofser.close()
return #exiting here
# Lets prepare to output now
args.output = args.output.rstrip('/')
if not os.path.exists(args.output):
os.makedirs(args.output)
ofser = open(args.output+"/RandomBiallelicTranscriptomeEmitter.serialized",'w')
ofser.write(rbe.get_serialized())
ofser.close()
rbe.set_gaussian_fragmentation_default_hiseq()
#rbe_ser = rbe.get_serialized()
sys.stderr.write("Sequencing short reads\n")
global shand1
shand1 = gzip.open(args.output+"/SR_1.fq.gz",'wb')
global shand2
shand2 = gzip.open(args.output+"/SR_2.fq.gz",'wb')
z = 0
buffer_full_size = 5000
buffer = []
if args.threads > 1:
p = Pool(processes=args.threads)
for i in range(args.short_read_count*args.starting_read_multiplier,args.short_read_count*(args.starting_read_multiplier+1)):
z = i+1
buffer.append(z)
if buffer_full_size <= len(buffer):
vals = buffer[:]
buffer = []
if args.threads > 1:
p.apply_async(process_short_read_buffer,args=(rbe,vals,args),callback=write_short_reads)
else:
oval = process_short_read_buffer(rbe,vals,args)
write_short_reads(oval)
if len(buffer) > 0:
vals = buffer[:]
buffer = []
if args.threads > 1:
p.apply_async(process_short_read_buffer,args=(rbe,vals,args),callback=write_short_reads)
else:
oval = process_short_read_buffer(rbe,vals,args)
write_short_reads(oval)
if args.threads > 1:
p.close()
p.join()
sys.stderr.write("\nFinished sequencing short reads\n")
shand1.close()
shand2.close()
global emissions_reports
for i in range(0,len(emissions_reports)): emissions_reports[i]= emissions_reports[i].get()
sr_report = combine_reports(emissions_reports)
rbe.emissions_report = {} # initialize so we don't accidentally overwrite
# Now lets print out some of the emission details
of = open(args.output+"/SR_report.txt",'w')
for name in sorted(rbe.name2locus.keys()):
express = 1
if rbe.transcriptome1.expression:
express = rbe.transcriptome1.expression.get_expression(name)
if name in sr_report:
of.write(name +"\t"+rbe.gene_names[name]+"\t"+str(rbe.name2locus[name])+"\t"+str(express)+"\t"+str(rbe.transcriptome1_rho[name])+"\t"+str(sr_report[name][0])+"\t"+str(sr_report[name][1])+"\n")
else:
of.write(name +"\t"+rbe.gene_names[name]+"\t"+str(rbe.name2locus[name])+"\t"+str(express)+"\t"+str(rbe.transcriptome1_rho[name])+"\t"+str(0)+"\t"+str(0)+"\n")
of.close()
rbe.emissions_report = {}
emissions_reports = []
# Now lets create the long read set
rbe.set_gaussian_fragmentation_default_pacbio()
#rbe_ser = rbe.get_serialized()
sys.stderr.write("Sequencing long ccs reads\n")
shand1 = gzip.open(args.output+"/LR_ccs95.fq.gz",'wb')
buffer_full_size = 500
buffer = []
if args.threads > 1:
p = Pool(processes=args.threads)
for i in range(args.starting_read_multiplier*args.long_read_ccs_count,(args.starting_read_multiplier+1)*args.long_read_ccs_count):
z = i+1
buffer.append(z)
if buffer_full_size <= len(buffer):
vals = buffer[:]
buffer = []
if args.threads > 1:
p.apply_async(process_long_ccs_read_buffer,args=(rbe,vals,args),callback=write_long_reads)
else:
oval = process_long_ccs_read_buffer(rbe,vals,args)
write_long_reads(oval)
if len(buffer) > 0:
vals = buffer[:]
buffer = []
if args.threads > 1:
p.apply_async(process_long_ccs_read_buffer,args=(rbe,vals,args),callback=write_long_reads)
else:
oval = process_long_ccs_read_buffer(rbe,vals,args)
write_long_reads(oval)
if args.threads > 1:
p.close()
p.join()
sys.stderr.write("\nFinished sequencing long reads\n")
shand1.close()
for i in range(0,len(emissions_reports)): emissions_reports[i]= emissions_reports[i].get()
lr_ccs_report = combine_reports(emissions_reports)
rbe.emissions_report = {} # initialize so we don't accidentally overwrite
# Now lets print out some of the emission details
of = open(args.output+"/LR_ccs95_report.txt",'w')
for name in sorted(rbe.name2locus.keys()):
express = 1
if rbe.transcriptome1.expression:
express = rbe.transcriptome1.expression.get_expression(name)
if name in lr_ccs_report:
of.write(name +"\t"+rbe.gene_names[name]+"\t"+str(rbe.name2locus[name])+"\t"+str(express)+"\t"+str(rbe.transcriptome1_rho[name])+"\t"+str(lr_ccs_report[name][0])+"\t"+str(lr_ccs_report[name][1])+"\n")
else:
of.write(name +"\t"+rbe.gene_names[name]+"\t"+str(rbe.name2locus[name])+"\t"+str(express)+"\t"+str(rbe.transcriptome1_rho[name])+"\t"+str(0)+"\t"+str(0)+"\n")
of.close()
rbe.emissions_report = {}
emissions_reports = []
# Now lets create the long subread read set
rbe.set_gaussian_fragmentation_default_pacbio()
#rbe_ser = rbe.get_serialized()
sys.stderr.write("Sequencing long subreads\n")
shand1 = gzip.open(args.output+"/LR_subreads.fq.gz",'wb')
buffer_full_size = 500
buffer = []
if args.threads > 1:
p = Pool(processes=args.threads)
for i in range(args.long_read_subread_count*args.starting_read_multiplier,(args.starting_read_multiplier+1)*args.long_read_subread_count):
z = i+1
buffer.append(z)
if buffer_full_size <= len(buffer):
vals = buffer[:]
buffer = []
if args.threads > 1:
p.apply_async(process_long_sub_read_buffer,args=(rbe,vals,args),callback=write_long_reads)
else:
oval = process_long_sub_read_buffer(rbe,vals,args)
write_long_reads(oval)
if len(buffer) > 0:
vals = buffer[:]
buffer = []
if args.threads > 1:
p.apply_async(process_long_sub_read_buffer,args=(rbe,vals,args),callback=write_long_reads)
else:
oval = process_long_sub_read_buffer(rbe,vals,args)
write_long_reads(oval)
if args.threads > 1:
p.close()
p.join()
sys.stderr.write("\nFinished sequencing long reads\n")
shand1.close()
for i in range(0,len(emissions_reports)): emissions_reports[i]= emissions_reports[i].get()
lr_sub_report = combine_reports(emissions_reports)
rbe.emissions_report = {} # initialize so we don't accidentally overwrite
# Now lets print out some of the emission details
of = open(args.output+"/LR_subreads_report.txt",'w')
for name in sorted(rbe.name2locus.keys()):
express = 1
if rbe.transcriptome1.expression:
express = rbe.transcriptome1.expression.get_expression(name)
if name in lr_sub_report:
of.write(name +"\t"+rbe.gene_names[name]+"\t"+str(rbe.name2locus[name])+"\t"+str(express)+"\t"+str(rbe.transcriptome1_rho[name])+"\t"+str(lr_sub_report[name][0])+"\t"+str(lr_sub_report[name][1])+"\n")
else:
of.write(name +"\t"+rbe.gene_names[name]+"\t"+str(rbe.name2locus[name])+"\t"+str(express)+"\t"+str(rbe.transcriptome1_rho[name])+"\t"+str(0)+"\t"+str(0)+"\n")
of.close()
combo_report = combine_reports([sr_report,lr_ccs_report,lr_sub_report])
of = open(args.output+"/LR_SR_combo_report.txt",'w')
for name in sorted(rbe.name2locus.keys()):
express = 1
if rbe.transcriptome1.expression:
express = rbe.transcriptome1.expression.get_expression(name)
if name in combo_report:
of.write(name +"\t"+rbe.gene_names[name]+"\t"+str(rbe.name2locus[name])+"\t"+str(express)+"\t"+str(rbe.transcriptome1_rho[name])+"\t"+str(combo_report[name][0])+"\t"+str(combo_report[name][1])+"\n")
else:
of.write(name +"\t"+rbe.gene_names[name]+"\t"+str(rbe.name2locus[name])+"\t"+str(express)+"\t"+str(rbe.transcriptome1_rho[name])+"\t"+str(0)+"\t"+str(0)+"\n")
of.close()
def combine_reports(reports):
c = {}
for report in reports:
for e in report:
if e not in c: c[e] = [0,0]
c[e][0] += report[e][0]
c[e][1] += report[e][1]
return c
def process_short_read_buffer(rbe,buffer,args):
#rbe = SimulationBasics.RandomBiallelicTranscriptomeEmitter()
#rbe.read_serialized(rbe_ser)
fq_prof_illumina = default_illumina()
read1 = ''
read2 = ''
zend = 0
for z in buffer:
[name,l1,r1] = rbe.emit_paired_short_read(args.short_read_length)
zend = z
read1 += "@SRSIM"+str(z)+"\n"
if args.no_errors:
read1 += l1+"\n"
read1 += "+\n"
read1 += len(l1)*'I'+"\n"
else:
l1perm = fq_prof_illumina.create_fastq_and_permute_sequence(l1)
read1 += l1perm['seq']+"\n"
read1 += "+\n"
read1 += l1perm['qual']+"\n"
read2 += "@SRSIM"+str(z)+"\n"
if args.no_errors:
read2 += r1+"\n"
read2 += "+\n"
read2 += len(r1)*'I'+"\n"
else:
r1perm = fq_prof_illumina.create_fastq_and_permute_sequence(r1)
read2 += r1perm['seq']+"\n"
read2 += "+\n"
read2 += r1perm['qual']+"\n"
return [read1,read2,len(buffer),rbe.emissions_report]
def process_long_ccs_read_buffer(rbe,buffer,args):
#rbe = SimulationBasics.RandomBiallelicTranscriptomeEmitter()
#rbe.read_serialized(rbe_ser)
fq_prof_pacbio_ccs95 = default_pacbio_ccs95()
read1 = ''
zend = 0
for z in buffer:
[name,seq] = rbe.emit_long_read()
g = 'm150101_010101_11111_c111111111111111111_s1_p0/'+str(z)+'/ccs'
zend = z
read1 += "@"+g+"\n"
if args.no_errors:
read1 += seq+"\n"
read1 += "+\n"
read1 += len(seq)*'I'+"\n"
else:
seqperm = fq_prof_pacbio_ccs95.create_fastq_and_permute_sequence(seq)
read1 += seqperm['seq']+"\n"
read1 += "+\n"
read1 += seqperm['qual']+"\n"
return [read1,len(buffer),rbe.emissions_report]
def process_long_sub_read_buffer(rbe,buffer,args):
#rbe = SimulationBasics.RandomBiallelicTranscriptomeEmitter()
#rbe.read_serialized(rbe_ser)
fq_prof_pacbio_subreads = default_pacbio_subreads()
read1 = ''
zend = 0
for z in buffer:
[name,seq] = rbe.emit_long_read()
g = 'm150102_010102_11112_c111111111111111112_s1_p0/'+str(z)+'/0_'+str(len(seq)-1)
zend = z
read1 += "@"+g+"\n"
if args.no_errors:
read1 += seq+"\n"
read1 += "+\n"
read1 += len(seq)*'I'+"\n"
else:
seqperm = fq_prof_pacbio_subreads.create_fastq_and_permute_sequence(seq)
read1 += seqperm['seq']+"\n"
read1 += "+\n"
read1 += seqperm['qual']+"\n"
return [read1,len(buffer),rbe.emissions_report]
def write_short_reads(vals):
[read1,read2,zsize,emissions_report] = vals
global write_lock
global gcounter
write_lock.acquire()
global shand1
global shand2
global emissions_reports
gcounter += zsize
sys.stderr.write(str(gcounter)+"\r")
shand1.write(read1)
shand2.write(read2)
eq = Queue()
eq.put(emissions_report)
emissions_reports.append(eq)
write_lock.release()
return
def write_long_reads(vals):
[read1,zsize,emissions_report] = vals
global write_lock
global gcounter
write_lock.acquire()
global shand1
global emissions_reports
gcounter += zsize
sys.stderr.write(str(gcounter)+"\r")
shand1.write(read1)
eq = Queue()
eq.put(emissions_report)
emissions_reports.append(eq)
write_lock.release()
return
# Pre:
# Take the allele info for one chromosome,
# Take one chromosome sequence string
# Take the left or right 0 or 1 position or the phased allele
# Take the chromosome name
# Post:
# Reutrn a number of changes made, the chromosome name, and the chromosome sequence
def adjust_reference_genome(ainfo,refchrom,lrpos,chrom_name):
reflist = list(refchrom)
counter = 0
for pos in sorted(ainfo):
if reflist[pos-1].upper() != ainfo[pos][lrpos].upper():
counter += 1
reflist[pos-1] = ainfo[pos][lrpos]
return [counter,chrom_name,''.join(reflist)]
def get_loci(transcripts_genepred):
loci = Loci()
loci.verbose= True
with open(transcripts_genepred) as inf:
for line in inf:
if line[0]=='#': continue
gpd = GenePredEntry(line.rstrip())
rng = Bed(gpd.value('chrom'),gpd.value('txStart'),gpd.value('txEnd'))
rng.set_payload(gpd.value('name'))
loc1 = Locus()
loc1.add_member(rng)
loci.add_locus(loc1)
sys.stderr.write("Organizing genepred data into overlapping loci\n")
sys.stderr.write("Started with "+str(len(loci.loci))+" loci\n")
loci.update_loci()
sys.stderr.write("Ended with "+str(len(loci.loci))+" loci\n")
m = 0
locus2name = {}
name2locus = {}
for locus in loci.loci:
m+=1
for member in locus.members:
name = member.get_payload()
if m not in locus2name: locus2name[m] = set()
locus2name[m].add(name)
name2locus[name] = m
return [locus2name,name2locus]
def load_from_inputs(args):
#Read in the VCF file
sys.stderr.write("Reading in the VCF file\n")
alleles = {}
#with open(args.phased_VCF) as inf:
with open(args.inputs[1]) as inf:
for line in inf:
vcf = VCF(line)
if not vcf.is_snp(): continue
g = vcf.get_phased_genotype()
if not g: continue
if vcf.value('chrom') not in alleles:
alleles[vcf.value('chrom')] = {}
if vcf.value('pos') in alleles[vcf.value('chrom')]:
sys.stderr.write("WARNING: seeing the same position twice.\n"+line.rstrip()+"\n")
alleles[vcf.value('chrom')][vcf.value('pos')] = g # set our left and right
sys.stderr.write("Reading in the reference genome\n")
#ref = read_fasta_into_hash(args.reference_genome)
ref = read_fasta_into_hash(args.inputs[0])
res1 = []
res2 = []
p = None
sys.stderr.write("Introducing VCF changes to reference sequences\n")
# Pretty memory intesnive to so don't go with all possible threads
if args.threads > 1: p = Pool(processes=max(1,int(args.threads/4)))
for chrom in ref:
# handle the case where there is no allele information
if chrom not in alleles:
r1q = Queue()
r1q.put([0,chrom,ref[chrom]])
res1.append(r1q)
r2q = Queue()
r2q.put([0,chrom,ref[chrom]])
res2.append(r2q)
elif args.threads > 1:
res1.append(p.apply_async(adjust_reference_genome,args=(alleles[chrom],ref[chrom],0,chrom)))
res2.append(p.apply_async(adjust_reference_genome,args=(alleles[chrom],ref[chrom],1,chrom)))
else:
r1q = Queue()
r1q.put(adjust_reference_genome(alleles[chrom],ref[chrom],0,chrom))
res1.append(r1q)
r2q = Queue()
r2q.put(adjust_reference_genome(alleles[chrom],ref[chrom],1,chrom))
res2.append(r2q)
if args.threads > 1:
p.close()
p.join()
# now we can fill reference 1 with all our new sequences
ref1 = {}
c1 = 0
for i in range(0,len(res1)):
res = res1[i].get()
c1 += res[0]
ref1[res[1]]=res[2]
# now we can fill reference 2 with all our new sequences
ref2 = {}
c2 = 0
for i in range(0,len(res2)):
res = res2[i].get()
c2 += res[0]
ref2[res[1]]=res[2]
sys.stderr.write("Made "+str(c1)+"|"+str(c2)+" changes to the reference\n")
# Now ref1 and ref2 have are the diploid sources of the transcriptome
gpdnames = {}
txn1 = Transcriptome()
txn2 = Transcriptome()
txn1.set_reference_genome_dictionary(ref1)
txn2.set_reference_genome_dictionary(ref2)
#with open(args.transcripts_genepred) as inf:
with open(args.inputs[2]) as inf:
for line in inf:
if line[0]=='#': continue
txn1.add_genepred_line(line.rstrip())
txn2.add_genepred_line(line.rstrip())
gpd = GenePredEntry(line.rstrip())
gpdnames[gpd.value('name')] = gpd.value('gene_name')
# The transcriptomes are set but we dont' really need the references anymore
# Empty our big memory things
txn1.ref_hash = None
txn2.ref_hash = None
for chrom in ref1.keys(): del ref1[chrom]
for chrom in ref2.keys(): del ref2[chrom]
for chrom in ref.keys(): del ref[chrom]
if not args.locus_by_gene_name:
#[locus2name,name2locus] = get_loci(args.transcripts_genepred)
[locus2name,name2locus] = get_loci(args.inputs[2])
else: # set locus by gene name
sys.stderr.write("Organizing loci by gene name\n")
locus2name = {}
name2locus = {}
numname = {}
m = 0
for name in sorted(gpdnames):
gene = gpdnames[name]
if gene not in numname:
m+=1
numname[gene] = m
num = numname[gene]
if num not in locus2name:
locus2name[num] = set()
locus2name[num].add(name)
name2locus[name] = num
sys.stderr.write("Ended with "+str(len(locus2name.keys()))+" loci\n")
if args.isoform_expression:
sys.stderr.write("Reading expression from a TSV\n")
with open(args.isoform_expression) as inf:
line1 = inf.readline()
for line in inf:
f = line.rstrip().split("\t")
txn1.add_expression(f[0],float(f[1]))
txn2.add_expression(f[0],float(f[1]))
elif args.cufflinks_isoform_expression:
sys.stderr.write("Using cufflinks expression\n")
cuffz = 0
with open(args.cufflinks_isoform_expression) as inf:
line1 = inf.readline()
for line in inf:
cuffz +=1
sys.stderr.write(str(cuffz)+" cufflinks entries processed\r")
f = line.rstrip().split("\t")
txn1.add_expression_no_update(f[0],float(f[9]))
txn2.add_expression_no_update(f[0],float(f[9]))
txn1.update_expression()
txn2.update_expression()
sys.stderr.write("\n")
elif args.uniform_expression:
sys.stderr.write("Using uniform expression model\n")
else:
sys.stderr.write("Warning isoform expression not sepcified, using uniform expression model.\n")
# Now we have the transcriptomes set
rhos = {} # The ASE of allele 1 (the left side)
randos = {}
if args.seed:
random.seed(args.seed)
for z in locus2name: randos[z] = random.random()
sys.stderr.write("Setting rho for each transcript\n")
# Lets set rho for ASE for each transcript
for tname in sorted(txn1.transcripts):
if args.ASE_identical or args.ASE_identical == 0:
rhos[tname] = float(args.ASE_identical)
elif args.ASE_isoform_random:
rhos[tname] = random.random()
else: # we must be on locus random
rhos[tname] = randos[name2locus[tname]]
#Now our dataset is set up
rbe = SimulationBasics.RandomBiallelicTranscriptomeEmitter(txn1,txn2)
rbe.gene_names = gpdnames
rbe.name2locus = name2locus
rbe.set_transcriptome1_rho(rhos)
return rbe
if __name__=="__main__":
main()
|
from pyplan.pyplan.preference_module.models import *
from pyplan.pyplan.preference.models import *
from pyplan.pyplan.users.models import *
from pyplan.pyplan.companies.models import *
from pyplan.pyplan.usercompanies.models import *
from pyplan.pyplan.user_company_preference.models import *
from pyplan.pyplan.company_preference.models import *
from pyplan.pyplan.dashboard.models import *
from pyplan.pyplan.dashboard_comment.models import *
from pyplan.pyplan.department.models import *
from pyplan.pyplan.report.models import *
from pyplan.pyplan.diagram_shortcut.models import *
from pyplan.pyplan.activity.models import *
from pyplan.pyplan.external_link.models import *
from pyplan.pyplan.external_link.report.models import *
from pyplan.pyplan.external_link.dashboard.models import *
from pyplan.pyplan.external_link.node.models import *
from pyplan.pyplan.common.email.models import *
|
#!/usr/bin/env python
# vim: set fileencoding=UTF-8 :
"""
weather.py - jenni Weather Module
Copyright 2009-2013, Michael Yanovich (yanovich.net)
Copyright 2008-2013, Sean B. Palmer (inamidst.com)
Licensed under the Eiffel Forum License 2.
More info:
* jenni: https://github.com/myano/jenni/
* Phenny: http://inamidst.com/phenny/
"""
import json
import re
import urllib
import web
from tools import deprecated
from modules import latex
from modules import unicode as uc
from icao import data
r_from = re.compile(r'(?i)([+-]\d+):00 from')
r_tag = re.compile(r'<(?!!)[^>]+>')
re_line = re.compile('<small>1</small>(.*)')
re_lat = re.compile('<span class="latitude">(\S+)</span>')
re_long = re.compile('<span class="longitude">(\S+)</span>')
cnty = re.compile('<a href="/countries/\S+\.html">(.+)</a>')
city = re.compile('<a href="/maps/\S+">(.+)</a>')
def clean(txt):
'''Remove HTML entities from a given text'''
return r_tag.sub('', txt)
def location(name):
name = urllib.quote(name.encode('utf-8'))
uri = "http://www.geonames.org/search.html?q=%s" % (name)
if re.match('\d{5}', name):
uri += '&country=us'
page = web.get(uri)
unknown = ('?', '?', '0', '0')
line = re_line.findall(page)
if not line:
return unknown
line = line[0]
find_lat = re_lat.findall(line)
find_lng = re_long.findall(line)
find_cnty = cnty.findall(line)
find_city = city.findall(line)
if find_lng and find_lat and find_cnty and find_city:
name = clean(find_city[0])
countryName = clean(find_cnty[0])
lat = clean(find_lat[0])
lng = clean(find_lng[0])
else:
return unknown
return name, countryName, lat, lng
class GrumbleError(object):
pass
def local(icao, hour, minute):
uri = ('http://www.flightstats.com/' +
'go/Airport/airportDetails.do?airportCode=%s')
try: bytes = web.get(uri % icao)
except AttributeError:
raise GrumbleError('A WEBSITE HAS GONE DOWN WTF STUPID WEB')
m = r_from.search(bytes)
if m:
offset = m.group(1)
lhour = int(hour) + int(offset)
lhour = lhour % 24
return (str(lhour) + ':' + str(minute) + ', ' + str(hour) +
str(minute) + 'Z')
return str(hour) + ':' + str(minute) + 'Z'
def code(jenni, search):
if search.upper() in data:
return search.upper()
else:
name, country, latitude, longitude = location(search)
if name == '?': return False
sumOfSquares = (99999999999999999999999999999, 'ICAO')
for icao_code in data:
lat = float(data[icao_code][0])
lon = float(data[icao_code][1])
latDiff = abs(float(latitude) - float(lat))
lonDiff = abs(float(longitude) - float(lon))
diff = (latDiff * latDiff) + (lonDiff * lonDiff)
if diff < sumOfSquares[0]:
sumOfSquares = (diff, icao_code)
return sumOfSquares[1]
def get_metar(icao_code):
'''Obtain METAR data from NOAA for a given ICAO code'''
uri = 'http://weather.noaa.gov/pub/data/observations/metar/stations/%s.TXT'
try:
page = web.get(uri % icao_code)
except AttributeError:
raise GrumbleError('OH CRAP NOAA HAS GONE DOWN THE WEB IS BROKEN')
if 'Not Found' in page:
return False, icao_code + ': no such ICAO code, or no NOAA data.'
return True, page
def get_icao(jenni, inc, command='weather'):
'''Provide the ICAO code for a given input'''
if not inc:
return False, 'Try .%s London, for example?' % (command)
icao_code = code(jenni, inc)
if not icao_code:
return False, 'No ICAO code found, sorry.'
return True, icao_code
def show_metar(jenni, input):
'''.metar <location> -- shows the raw METAR data for a given location'''
txt = input.group(2)
if not txt:
return jenni.say('Try .metar London, for example?')
status, icao_code = get_icao(jenni, txt, 'metar')
if not status:
return jenni.say(icao_code)
status, metar = get_metar(icao_code)
if not status:
return jenni.say(metar)
return jenni.say(metar)
show_metar.commands = ['metar']
show_metar.example = '.metar London'
show_metar.priority = 'low'
def f_weather(jenni, input):
""".weather <ICAO> - Show the weather at airport with the code <ICAO>."""
text = input.group(2)
status, icao_code = get_icao(jenni, text)
if not status:
return jenni.say(icao_code)
status, page = get_metar(icao_code)
if not status:
return jenni.say(page)
metar = page.splitlines().pop()
metar = metar.split(' ')
if len(metar[0]) == 4:
metar = metar[1:]
if metar[0].endswith('Z'):
time = metar[0]
metar = metar[1:]
else: time = None
if metar[0] == 'AUTO':
metar = metar[1:]
if metar[0] == 'VCU':
jenni.say(icao_code + ': no data provided')
return
if metar[0].endswith('KT'):
wind = metar[0]
metar = metar[1:]
else: wind = None
if ('V' in metar[0]) and (metar[0] != 'CAVOK'):
vari = metar[0]
metar = metar[1:]
else: vari = None
if ((len(metar[0]) == 4) or
metar[0].endswith('SM')):
visibility = metar[0]
metar = metar[1:]
else: visibility = None
while metar[0].startswith('R') and (metar[0].endswith('L')
or 'L/' in metar[0]):
metar = metar[1:]
if len(metar[0]) == 6 and (metar[0].endswith('N') or
metar[0].endswith('E') or
metar[0].endswith('S') or
metar[0].endswith('W')):
metar = metar[1:] # 7000SE?
cond = []
while (((len(metar[0]) < 5) or
metar[0].startswith('+') or
metar[0].startswith('-')) and (not (metar[0].startswith('VV') or
metar[0].startswith('SKC') or metar[0].startswith('CLR') or
metar[0].startswith('FEW') or metar[0].startswith('SCT') or
metar[0].startswith('BKN') or metar[0].startswith('OVC')))):
cond.append(metar[0])
metar = metar[1:]
while '/P' in metar[0]:
metar = metar[1:]
if not metar:
return jenni.say(icao_code + ': no data provided')
cover = []
while (metar[0].startswith('VV') or metar[0].startswith('SKC') or
metar[0].startswith('CLR') or metar[0].startswith('FEW') or
metar[0].startswith('SCT') or metar[0].startswith('BKN') or
metar[0].startswith('OVC')):
cover.append(metar[0])
metar = metar[1:]
if not metar:
return jenni.say(icao_code + ': no data provided')
if metar[0] == 'CAVOK':
cover.append('CLR')
metar = metar[1:]
if metar[0] == 'PRFG':
cover.append('CLR') # @@?
metar = metar[1:]
if metar[0] == 'NSC':
cover.append('CLR')
metar = metar[1:]
if ('/' in metar[0]) or (len(metar[0]) == 5 and metar[0][2] == '.'):
temp = metar[0]
metar = metar[1:]
else: temp = None
if metar[0].startswith('QFE'):
metar = metar[1:]
if metar[0].startswith('Q') or metar[0].startswith('A'):
pressure = metar[0]
metar = metar[1:]
else: pressure = None
if time:
hour = time[2:4]
minute = time[4:6]
time = local(icao_code, hour, minute)
else: time = '(time unknown)'
speed = False
if wind:
try:
speed = float(wind[3:5])
except:
speed = 0
if speed < 1:
description = 'Calm'
elif speed < 4:
description = 'Light air'
elif speed < 7:
description = 'Light breeze'
elif speed < 11:
description = 'Gentle breeze'
elif speed < 16:
description = 'Moderate breeze'
elif speed < 22:
description = 'Fresh breeze'
elif speed < 28:
description = 'Strong breeze'
elif speed < 34:
description = 'Near gale'
elif speed < 41:
description = 'Gale'
elif speed < 48:
description = 'Strong gale'
elif speed < 56:
description = 'Storm'
elif speed < 64:
description = 'Violent storm'
else: description = 'Hurricane'
degrees = wind[0:3]
if degrees == 'VRB':
degrees = u'\u21BB'.encode('utf-8')
elif (degrees <= 22.5) or (degrees > 337.5):
degrees = u'\u2191'.encode('utf-8')
elif (degrees > 22.5) and (degrees <= 67.5):
degrees = u'\u2197'.encode('utf-8')
elif (degrees > 67.5) and (degrees <= 112.5):
degrees = u'\u2192'.encode('utf-8')
elif (degrees > 112.5) and (degrees <= 157.5):
degrees = u'\u2198'.encode('utf-8')
elif (degrees > 157.5) and (degrees <= 202.5):
degrees = u'\u2193'.encode('utf-8')
elif (degrees > 202.5) and (degrees <= 247.5):
degrees = u'\u2199'.encode('utf-8')
elif (degrees > 247.5) and (degrees <= 292.5):
degrees = u'\u2190'.encode('utf-8')
elif (degrees > 292.5) and (degrees <= 337.5):
degrees = u'\u2196'.encode('utf-8')
if not icao_code.startswith('EN') and not icao_code.startswith('ED'):
## for any part of the world except Germany and Norway
wind = '%s %.1fkt (%s)' % (description, speed, degrees)
elif icao_code.startswith('ED'):
## Germany
kmh = float(speed * 1.852)
wind = '%s %.1fkm/h (%.1fkt) (%s)' % (description, kmh, speed, degrees)
elif icao_code.startswith('EN'):
## Norway
ms = float(speed * 0.514444444)
wind = '%s %.1fm/s (%.1fkt) (%s)' % (description, ms, speed, degrees)
else: wind = '(wind unknown)'
if visibility:
visibility = visibility + 'm'
else: visibility = '(visibility unknown)'
if cover:
level = None
for c in cover:
if c.startswith('OVC') or c.startswith('VV'):
if (level is None) or (level < 8):
level = 8
elif c.startswith('BKN'):
if (level is None) or (level < 5):
level = 5
elif c.startswith('SCT'):
if (level is None) or (level < 3):
level = 3
elif c.startswith('FEW'):
if (level is None) or (level < 1):
level = 1
elif c.startswith('SKC') or c.startswith('CLR'):
if level is None:
level = 0
if level == 8:
cover = u'Overcast \u2601'.encode('utf-8')
elif level == 5:
cover = 'Cloudy'
elif level == 3:
cover = 'Scattered'
elif (level == 1) or (level == 0):
cover = u'Clear \u263C'.encode('utf-8')
else: cover = 'Cover Unknown'
else: cover = 'Cover Unknown'
if temp:
if '/' in temp:
t = temp.split('/')
temp = t[0]
dew = t[1]
else: temp = temp.split('.')[0]
if temp.startswith('M'):
temp = '-' + temp[1:]
if dew.startswith('M'):
dew = '-' + dew[1:]
try:
temp = float(temp)
dew = float(dew)
except ValueError:
temp = '?'
dew = '?'
else:
temp = '?'
dew = '?'
windchill = False
if isinstance(temp, float) and isinstance(speed, float) and temp <= 10.0 and speed > 0:
speed_kmh = speed * 1.852
windchill = 13.12 + (0.6215 * temp) - (11.37 * (speed_kmh ** (0.16))) + (0.3965 * temp * (speed_kmh ** (0.16)))
windchill = float(windchill)
f = (windchill * 1.8) + 32
if icao_code.startswith('K'):
## if in North America
windchill = u'%.1f\u00B0F (%.1f\u00B0C)'.encode('utf-8') % (f, windchill)
else:
windchill = u'%.1f\u00B0C'.encode('utf-8') % (windchill)
if pressure:
if pressure.startswith('Q'):
pressure = pressure.lstrip('Q')
if pressure != 'NIL':
pressure = str(float(pressure)) + 'mb'
else: pressure = '?mb'
elif pressure.startswith('A'):
pressure = pressure.lstrip('A')
if pressure != 'NIL':
inches = pressure[:2] + '.' + pressure[2:]
mb = float(inches) * 33.7685
pressure = '%sin (%.2fmb)' % (inches, mb)
else: pressure = '?mb'
if isinstance(temp, float):
f = (temp * 1.8) + 32
temp = u'%.1f\u00B0F (%.1f\u00B0C)'.encode('utf-8') % (f, temp)
if isinstance(dew, float):
f = (dew * 1.8) + 32
dew = u'%.1f\u00B0F (%.1f\u00B0C)'.encode('utf-8') % (f, dew)
else: pressure = '?mb'
if isinstance(temp, float):
temp = u'%.1f\u00B0C'.encode('utf-8') % temp
if isinstance(dew, float):
dew = u'%.1f\u00B0C'.encode('utf-8') % dew
if cond:
conds = cond
cond = ''
intensities = {
'-': 'Light',
'+': 'Heavy'
}
descriptors = {
'MI': 'Shallow',
'PR': 'Partial',
'BC': 'Patches',
'DR': 'Drifting',
'BL': 'Blowing',
'SH': 'Showers of',
'TS': 'Thundery',
'FZ': 'Freezing',
'VC': 'In the vicinity:',
'RA': 'Unimaginable',
}
phenomena = {
'DZ': 'Drizzle',
'RA': 'Rain',
'SN': 'Snow',
'SG': 'Snow Grains',
'IC': 'Ice Crystals',
'PL': 'Ice Pellets',
'GR': 'Hail',
'GS': 'Small Hail',
'UP': 'Unknown Precipitation',
'BR': 'Mist',
'FG': 'Fog',
'FU': 'Smoke',
'VA': 'Volcanic Ash',
'DU': 'Dust',
'SA': 'Sand',
'HZ': 'Haze',
'PY': 'Spray',
'PO': 'Whirls',
'SQ': 'Squalls',
'FC': 'Tornado',
'SS': 'Sandstorm',
'DS': 'Duststorm',
# ? Cf. http://swhack.com/logs/2007-10-05#T07-58-56
'TS': 'Thunderstorm',
'SH': 'Showers'
}
for c in conds:
if c.endswith('//'):
if cond: cond += ', '
cond += 'Some Precipitation'
elif len(c) == 5:
intensity = intensities[c[0]]
descriptor = descriptors[c[1:3]]
phenomenon = phenomena.get(c[3:], c[3:])
if cond: cond += ', '
cond += intensity + ' ' + descriptor + ' ' + phenomenon
elif len(c) == 4:
descriptor = descriptors.get(c[:2], c[:2])
phenomenon = phenomena.get(c[2:], c[2:])
if cond: cond += ', '
cond += descriptor + ' ' + phenomenon
elif len(c) == 3:
intensity = intensities.get(c[0], c[0])
phenomenon = phenomena.get(c[1:], c[1:])
if cond: cond += ', '
cond += intensity + ' ' + phenomenon
elif len(c) == 2:
phenomenon = phenomena.get(c, c)
if cond: cond += ', '
cond += phenomenon
output = str()
output += 'Cover: ' + cover
output += ', Temp: ' + str(temp)
output += ', Dew Point: ' + str(dew)
if windchill:
output += ', Windchill: ' + str(windchill)
output += ', Pressure: ' + pressure
if cond:
output += ' Condition: ' + cond
output += ', Wind: ' + wind
output += ' - %s, %s' % (str(icao_code), time)
jenni.say(output)
f_weather.rule = (['weather', 'wx'], r'(.*)')
def fucking_weather(jenni, input):
""".fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the fucking weather"""
text = input.group(2)
if not text:
jenni.reply('INVALID FUCKING INPUT. PLEASE ENTER A FUCKING ZIP CODE, OR A FUCKING CITY-STATE PAIR.')
return
new_text = str()
new_text = uc.encode(text)
search = urllib.quote((new_text).strip())
url = 'http://thefuckingweather.com/?where=%s' % (search)
try:
page = web.get(url)
except:
return jenni.say("I COULDN'T ACCESS THE FUCKING SITE.")
re_mark = re.compile('<p class="remark">(.*?)</p>')
re_temp = re.compile('<span class="temperature" tempf="\S+">(\S+)</span>')
re_condition = re.compile('<p class="large specialCondition">(.*?)</p>')
re_flavor = re.compile('<p class="flavor">(.*?)</p>')
re_location = re.compile('<span id="locationDisplaySpan" class="small">(.*?)</span>')
temps = re_temp.findall(page)
remarks = re_mark.findall(page)
conditions = re_condition.findall(page)
flavor = re_flavor.findall(page)
location = re_location.findall(page)
response = str()
if location and location[0]:
response += location[0] + ': '
if temps:
tempf = float(temps[0])
tempc = (tempf - 32.0) * (5 / 9.0)
response += u'%.1f°F?! %.1f°C?! ' % (tempf, tempc)
if remarks:
response += remarks[0]
else:
response += "I CAN'T FIND THAT SHIT ON THEIR PAGE."
if conditions:
response += ' ' + conditions[0]
if flavor:
response += ' -- ' + flavor[0].replace(' ', ' ')
jenni.say(response)
fucking_weather.commands = ['fucking_weather', 'fw']
fucking_weather.priority = 'low'
fucking_weather.rate = 5
def windchill(jenni, input):
'''.windchill <temp> <wind speed> -- shows Windchill in F'''
text = input.split()
if len(text) == 1:
return jenni.say(u'.windchill <temp> <wind speed> -- shows Windchill in \u00B0F')
if len(text) >= 3:
try:
temp = float(text[1])
wind = float(text[2])
except:
return jenni.say('Invalid arguments! Try, .windchill without any parameters.')
if temp > 50:
return jenni.say(u'The windchill formula only works on temperatures below 50 \u00B0F')
if wind < 0:
return jenni.say("You can't have negative wind speed!")
elif wind >= 300:
jenni.reply('Are you okay?')
## cf. http://is.gd/mgLuzU
wc = 35.74 + (0.6215 * temp) - (35.75 * (wind ** (0.16))) + (0.4275 * temp * (wind ** (0.16)))
jenni.say(u'Windchill: %2.f \u00B0F' % (wc))
windchill.commands = ['windchill', 'wc']
windchill.priority = 'low'
if __name__ == '__main__':
print __doc__.strip()
|
from datetime import datetime
def test_arg_option_doc():
with open('README.rst') as f:
content = f.read()
option_parts = content.split('Options\n-------')[1].split('Video options\n')[0].strip()
option_parts = option_parts.splitlines()[1:-1]
option_parts = [x.split(' ', 2) for x in option_parts]
for idx, x in enumerate(option_parts):
option_parts[idx][2] = x[2].strip()
with open('we_get/core/we_get.py') as f:
m_content = f.read()
m_option_parts = m_content.split('Options:')[1].split('Video options')[0].strip().splitlines()
m_option_parts = [x.strip().split(' ', 2) for x in m_option_parts]
for idx, x in enumerate(m_option_parts):
m_option_parts[idx][2] = x[2].strip()
assert option_parts == m_option_parts
def test_year():
current_year = datetime.now().year
with open('we_get/core/we_get.py') as f:
m_content = f.read()
m_content.splitlines()[1]
year = m_content.split('Copyright (c) 2016-')[1].split(' ')[0]
assert year == str(current_year)
|
# This script executes the PySedSim runs to produce the results associated with
# Formulation I in Wild et al. (in review)
#import pysedsim
from pysedsim import PySedSim
# instantiate model
pys = PySedSim(file_name = 'formulation_1.csv')
# run a combined serial execution of multiple deterministic and stochastic simulations
pys.execute_simulation() |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.widgets import Slider, Button, RadioButtons
"""
CONIC SECTION COORDINATE VISUALISER
A small script to visualise conic sections in an x & r coordinate system
in terms of a Radius, Bluntness, and two orthogonal oordinate directions as
defined in The Supersonic Blunt Body Problem - Milton D. Van Dyke
https://doi.org/10.2514/8.7744 / Try SciHub ;)
You can play with R, B, eta, and eps to see the x,y path along with a radius circle with the option to trace their path
Andrew McLean
Last Updated: 05/12/17
https://github.com/TropicalIsland/shock_coordinate_system_visualiser
Available under the MIT license
"""
# Assign initial parameters
R0=1
B0=0
eta0=0
eps0=0
# r is evaluated now
r=R0*eta0*eps0
# Handle specific bluntness cases
if B0 == 0:
x=R0/2*(1+eps0**2-eta0**2)
elif B0 == 1:
x=R0*(1-np.sqrt(1-eps0)**2*eta0)
else:
x=R0/B0*(np.sqrt(1-B0*np.square(eps0)*(1-B0+B0*np.square(eta0))))
global fig, ax
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.30)
global l
l, = plt.plot(x,r,'go')
global circ
circ=mpatches.Circle((R0, 0), R0, color='b', fill=False)
ax.add_patch(circ)
ax.grid(linestyle='-')
plt.xlabel('x')
plt.ylabel('r')
ax.set_xlim(-1,1)
ax.set_ylim(-1,1)
ax.set_aspect('equal',adjustable='box')
# Make parameter sliders and locate them nicely
axcolor = 'lightgoldenrodyellow'
axeta = plt.axes([0.25, 0.16, 0.65, 0.03], facecolor=axcolor)
axeps = plt.axes([0.25, 0.11, 0.65, 0.03], facecolor=axcolor)
axR = plt.axes([0.25, 0.06, 0.65, 0.03])
axB = plt.axes([0.25, 0.01, 0.65, 0.03])
sR = Slider(axR, 'R', 0.01, 1, valinit=R0)
sB = Slider(axB, 'B', 0, 1, valinit=B0)
seta = Slider(axeta, 'eta', -1, 1, valinit=eta0)
seps = Slider(axeps, 'eps', -1, 1, valinit=eps0)
# Define slider functions & provide trace functionality
def update(val):
global x
global r
x_old = x
r_old = r
eta = seta.val
eps = seps.val
R = sR.val
B = sB.val
r = R*eta*eps
l.set_ydata(r)
if B == 0:
x=R/2*(1+eps**2-eta**2)
elif B == 1:
x=R*(1-np.sqrt(1-eps**2*eta))
else:
x=R/B*(np.sqrt(1-B*eps**2*(1-B+B*eta**2)))
l.set_xdata(x)
global trace
if trace == True:
plt.sca(ax)
ax.plot([x_old,x],[r_old,r],'-')
global circ
circ.remove()
circ=mpatches.Circle((R, 0), R, color='b', fill=False)
ax.add_patch(circ)
fig.canvas.draw_idle()
seta.on_changed(update)
seps.on_changed(update)
sR.on_changed(update)
sB.on_changed(update)
# Make a trace button
traceax = plt.axes([0.08, 0.11, 0.1, 0.04])
trace_button = Button(traceax, 'Trace?', color=axcolor, hovercolor='0.975')
trace = False
def trace(event):
global trace
trace = not trace
trace_button.on_clicked(trace)
# Make reset button
# Only resets inital params! Not window!
resetax = plt.axes([0.08, 0.01, 0.1, 0.04])
reset_button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sR.reset()
sB.reset()
seta.reset()
seps.reset()
reset_button.on_clicked(reset)
plt.show() |
#Faça um programa que calcule a área de um triângulo, cuja
#base e altura são fornecidas pelo usuário. Esse programa
#não pode permitir a entrada de dados inválidos, ou seja,
#medidas menores ou iguais a 0.
base=float(input("Informe a base do triangulo: "))
altura=float(input("Informe a altura do triangulo: "))
area=(base*altura)/2 |
import beatsaver
def test_getMapFromID():
doesNotExist = beatsaver.maps.get_map_from_id('99999') # map does not exist
assert doesNotExist == None
machinegun = beatsaver.maps.get_map_from_id('9e5c')
assert machinegun.uploader.name == 'de125'
def test_getMapFromHash():
ov = "f402008042efaca4291a6633ebb6b562e4adcd87" # this is the ov hash lol
sacrament = beatsaver.maps.get_map_from_hash(ov)
assert sacrament.uploader.name == "rogdude"
def test_getMapsFromUser():
user = beatsaver.extras.get_user_from_username('megamaz')
megamaps = beatsaver.maps.get_maps_from_user(user.id)
assert megamaps != None
def test_getLatestMaps():
latest = beatsaver.maps.get_latest_maps(False)
assert latest != None
def test_getMostPlayedMaps():
mostplayed = beatsaver.maps.get_most_played_maps()
assert mostplayed != None
def test_getUserFromUsername():
user = beatsaver.extras.get_user_from_username('megamaz')
assert user.name == 'megamaz' |
import speech_recognition as sr
import pyaudio
# obtain audio from the microphone
from CommandConverters import commandparser
r = sr.Recognizer()
r.energy_threshold = 450 #設定多大能量上才會持續收聽
#r.dynamic_energy_threshold = False
audio = None
with sr.Microphone() as source:
print("current energy is {}".format(r.energy_threshold))
#print("current threshold is {}".format(r.threshold))
print("Say something!")
#r.adjust_for_ambient_noise(source)
audio = r.listen(source,timeout=2)
try:
#print("Sphinx thinks you said " + str(r.recognize_sphinx(audio,language="zh-CN",grammar='speechcommand'))) # grammar='speechcommand.fsg'
#raw_recog_str = str(r.recognize_sphinx(audio,language="zh-CN",grammar='speechcommand'))
#print("raw recognized string:"+str(r.recognize_sphinx(audio,language="zh-CN",grammar='speechcommand')))
commandparser.Sent2Command(str(r.recognize_sphinx(audio,language="zh-CN",grammar='speechcommand2')))
except sr.UnknownValueError:
print("Sphinx could not understand audio")
except sr.RequestError as e:
print("Sphinx error; {0}".format(e))
|
import os
import random
import numpy as np
import cv2
voc_colormap = [
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
]
def DataGenerator(voc_path, batch_size, split):
"""
generate voc image and mask batch
"""
assert split in ["test", "train", "val"]
label_path = os.path.join(voc_path, "ImageSets", "Segmentation", split + ".txt")
with open(label_path) as f:
lines = f.readlines()
random.shuffle(lines)
def extract_raw_image(image_name):
image_path = os.path.join(voc_path, "JPEGImages", image_name + ".jpg")
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_NEAREST)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def extract_mask(image_name):
mask_path = os.path.join(voc_path, "SegmentationClass", image_name + ".png")
mask_image = cv2.imread(mask_path)
mask_image = cv2.resize(mask_image, (224, 224), interpolation=cv2.INTER_NEAREST)
mask_image = cv2.cvtColor(mask_image, cv2.COLOR_BGR2RGB)
mask = np.zeros(mask_image.shape[:-1], dtype=np.int32)
for x, y in np.ndindex(mask_image.shape[:-1]):
point = mask_image[x, y].tolist()
if point not in voc_colormap:
mask[x, y] = 0
else:
mask[x, y] = voc_colormap.index(point)
return mask
annotation_len = len(lines)
for i in range(0, annotation_len, batch_size):
batch_lines = lines[i : min(annotation_len, i + batch_size)]
image_names = [line.strip() for line in batch_lines]
x = [extract_raw_image(image_name) for image_name in image_names]
y = [extract_mask(image_name) for image_name in image_names]
x, y = (
np.stack(x, axis=0).astype(dtype=np.float32),
np.stack(y, axis=0).astype(dtype=np.float32),
)
yield x, y
|
from channels.routing import ProtocolTypeRouter
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'/websocket', consumers.VNCConsumer)
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenOperationBizfeeAftechRefundModel(object):
def __init__(self):
self._app_name = None
self._currency = None
self._fee_order_no = None
self._gmt_service = None
self._order_no = None
self._out_biz_no = None
self._refund_amount = None
self._refund_no = None
self._tnt_inst_id = None
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def currency(self):
return self._currency
@currency.setter
def currency(self, value):
self._currency = value
@property
def fee_order_no(self):
return self._fee_order_no
@fee_order_no.setter
def fee_order_no(self, value):
self._fee_order_no = value
@property
def gmt_service(self):
return self._gmt_service
@gmt_service.setter
def gmt_service(self, value):
self._gmt_service = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def refund_amount(self):
return self._refund_amount
@refund_amount.setter
def refund_amount(self, value):
self._refund_amount = value
@property
def refund_no(self):
return self._refund_no
@refund_no.setter
def refund_no(self, value):
self._refund_no = value
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
def to_alipay_dict(self):
params = dict()
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.currency:
if hasattr(self.currency, 'to_alipay_dict'):
params['currency'] = self.currency.to_alipay_dict()
else:
params['currency'] = self.currency
if self.fee_order_no:
if hasattr(self.fee_order_no, 'to_alipay_dict'):
params['fee_order_no'] = self.fee_order_no.to_alipay_dict()
else:
params['fee_order_no'] = self.fee_order_no
if self.gmt_service:
if hasattr(self.gmt_service, 'to_alipay_dict'):
params['gmt_service'] = self.gmt_service.to_alipay_dict()
else:
params['gmt_service'] = self.gmt_service
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.refund_amount:
if hasattr(self.refund_amount, 'to_alipay_dict'):
params['refund_amount'] = self.refund_amount.to_alipay_dict()
else:
params['refund_amount'] = self.refund_amount
if self.refund_no:
if hasattr(self.refund_no, 'to_alipay_dict'):
params['refund_no'] = self.refund_no.to_alipay_dict()
else:
params['refund_no'] = self.refund_no
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenOperationBizfeeAftechRefundModel()
if 'app_name' in d:
o.app_name = d['app_name']
if 'currency' in d:
o.currency = d['currency']
if 'fee_order_no' in d:
o.fee_order_no = d['fee_order_no']
if 'gmt_service' in d:
o.gmt_service = d['gmt_service']
if 'order_no' in d:
o.order_no = d['order_no']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'refund_amount' in d:
o.refund_amount = d['refund_amount']
if 'refund_no' in d:
o.refund_no = d['refund_no']
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
return o
|
from ImageLibrary import utils
from ImageLibrary.image_processor import ImageProcessor, get_image_from_config
def _get_threshold(info, threshold):
if threshold is not None:
return threshold
else:
return info[1]
class Template:
@utils.add_error_info
def __init__(self, name, config):
self.name = name
self.images = None #if there are multiple images
self.image = None #if this is one image
if isinstance(config, list):
self.images = []
for entry in config:
self.images.append(get_image_from_config(entry))
else:
self.image = get_image_from_config(config)
def _get_template_image(self, index):
if self.images is not None:
index = int(index)
assert index != -1, "Template {} must be reached by index, but it isn't set".format(self.name)
#assert index > 0, "Index must by more that zero"
assert index <= len(self.images), "{} has only {} elements, index is to big".format(self.name, len(self.images))
return self.images[index - 1]
return self.image
@utils.add_error_info
def find_template(self, index=-1, threshold=None, cache=False, zone=None):
image = self._get_template_image(index)
return ImageProcessor().find_image(image[0], _get_threshold(image, threshold), cache, zone)
@utils.add_error_info
def is_template_on_screen(self, index=-1, threshold=None, cache=False, zone=None):
image = self._get_template_image(index)
return ImageProcessor().is_image_on_screen(image[0], _get_threshold(image, threshold), cache, zone)
@utils.add_error_info
def template_should_be_on_screen(self, index=-1, threshold=None, cache=False, zone=None):
image = self._get_template_image(index)
return ImageProcessor().image_should_be_on_screen(image[0], _get_threshold(image, threshold), cache, zone)
@utils.add_error_info
def template_should_not_be_on_screen(self, index=-1, threshold=None, cache=False, zone=None):
image = self._get_template_image(index)
return ImageProcessor().image_should_not_be_on_screen(image[0], _get_threshold(image, threshold), cache, zone)
@utils.add_error_info
def wait_for_template(self, index=-1, threshold=None, timeout=15, zone=None):
image = self._get_template_image(index)
return ImageProcessor().wait_for_image(image[0], _get_threshold(image, threshold), timeout, zone)
@utils.add_error_info
def wait_for_template_to_hide(self, index=-1, threshold=None, timeout=15, zone=None):
image = self._get_template_image(index)
return ImageProcessor().wait_for_image_to_hide(image[0], _get_threshold(image, threshold), timeout, zone)
@utils.add_error_info
def wait_for_template_to_stop(self, index=-1, threshold=None, timeout=15, move_threshold=0.99, step=0.1):
image = self._get_template_image(index)
return ImageProcessor().wait_for_image_to_stop(image[0], _get_threshold(image, threshold), timeout, move_threshold, step)
@utils.add_error_info
def get_templates_count(self, index=-1, threshold=None, cache=False, zone=None):
image = self._get_template_image(index)
return ImageProcessor().get_images_count(image[0], _get_threshold(image, threshold), cache, zone)
@utils.add_error_info
def is_template_in_zone(self, index=-1, threshold=None, cache=False, zone=None):
"""Pass template as image to be found on screen in the given zone.
Takes a screenshot of the passed area and find given data on the screenshot.
Returns results for each argument."""
image = self._get_template_image(index)
return ImageProcessor().is_template_in_zone(image[0], zone)
class ComplexTemplate(object):
def __init__(self, name, config):
if not isinstance(config, list):
raise AssertionError('Config corrupted for complex template {}: must be list of filenames, {} actually'.format(name, config))
self.name = name
self.images = []
if isinstance(config, list):
for entry in config:
self.images.append(get_image_from_config(entry))
else:
raise AssertionError("Complex template must contain list of images")
@utils.add_error_info
def is_complex_template_on_screen(self, threshold=None, cache=False, zone=None):
on_screen = True
screen = ImageProcessor().get_screenshot() if not cache else None
for image in self.images:
on_screen &= ImageProcessor().is_image_on_screen(image[0], _get_threshold(image, threshold), cache, zone, screen)
if not on_screen:
break
return on_screen
@utils.add_error_info
def is_any_part_of_complex_template_on_screen(self, threshold=None, cache=False, zone=None):
screen = ImageProcessor().get_screenshot() if not cache else None
for image in self.images:
if ImageProcessor().is_image_on_screen(image[0], _get_threshold(image, threshold), cache, zone, screen):
return True
return False
@utils.add_error_info
def wait_for_complex_template(self, threshold=None, timeout=15, zone=None):
waiting_result = False
for image in self.images:
waiting_result = ImageProcessor().wait_for_image(image[0], _get_threshold(image, threshold), timeout, zone)
if not waiting_result:
return waiting_result
return waiting_result
@utils.add_error_info
def wait_for_complex_template_to_hide(self, threshold=None, timeout=15, zone=None):
waiting_result = False
for image in self.images:
waiting_result = ImageProcessor().wait_for_image_to_hide(image[0], _get_threshold(image, threshold), timeout, zone)
if not waiting_result:
return waiting_result
return waiting_result
#todo
# @utils.add_error_info
# def get_template_position(self, template, threshold=0.99):
# """Returns template's coordinates after search. You can also specify the threshold value.
# Examples:
# | Get Template Position | template=img.png | threshold=${None}
# """
#
#
# threshold = float(threshold)
# screen = ImageProcessor()._screenshot()
# return MatchObjects().match_and_return_coordinates(template, screen, threshold)
#
#
# @utils.add_error_info
# def match_template_in_zone(self, template, zone, invert=False):
# screen = ImageProcessor()._get_screenshot(zone)
#
# if invert:
# screen = ScreenshotOperations().invert_image(screen)
#
# return MatchObjects().match_objects(template, screen)
#
# @utils.add_error_info
# def is_template_in_zone(self, template, zone):
#
# screen = ImageProcessor()._get_screenshot(zone)
# screen.save('scr.png')
# img = ImageProcessor().load_image(template)
#
# return MatchObjects().match_objects_with_knn(screen, img)
#
|
from .pylineblocks import splitIntoBlocks, reindentBlock
|
from netket import legacy as nk
import numpy as np
import jax
from jax.experimental.optimizers import adam as Adam
# 1D Lattice
L = 20
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
# Hilbert space of spins on the graph
hi = nk.hilbert.Spin(s=1 / 2) ** L
ha = nk.operator.Ising(hilbert=hi, graph=g, h=1.0)
ma = nk.machine.MPSPeriodic(
hi, g, bond_dim=4, diag=False, symperiod=None, dtype=complex
)
ma.jax_init_parameters(seed=1232)
# Jax Sampler
sa = nk.sampler.MetropolisLocal(machine=ma, n_chains=2)
# Using native Jax Optimizers under the hood
op = nk.optimizer.jax.Wrap(ma, Adam(0.01))
sr = nk.optimizer.SR(ma, diag_shift=0.1)
# Create the optimization driver
gs = nk.Vmc(hamiltonian=ha, sampler=sa, optimizer=op, n_samples=1000, sr=sr)
# The first iteration is slower because of start-up jit times
gs.run(out="test", n_iter=1)
gs.run(out="test", n_iter=300)
|
import logging
import time
import traceback
from datetime import datetime
from threading import Thread
from typing import Optional
from waffles.api.abstract.handler import TaskManager
from waffles.commons.boot import CreateConfigFile
from waffles.commons.html import bold
from waffles.gems.web import get_icon_path
from waffles.gems.web.environment import EnvironmentUpdater
from waffles.gems.web.search import SearchIndexManager
from waffles.gems.web.suggestions import SuggestionsManager
from waffles.view.util.translation import I18n
class SuggestionsLoader(Thread):
def __init__(self, taskman: TaskManager, manager: SuggestionsManager,
i18n: I18n, logger: logging.Logger, suggestions_callback, create_config: CreateConfigFile,
internet_connection: bool, suggestions: Optional[dict] = None):
super(SuggestionsLoader, self).__init__(daemon=True)
self.taskman = taskman
self.task_id = 'web_sugs'
self.manager = manager
self.suggestions_callback = suggestions_callback
self.i18n = i18n
self.logger = logger
self.suggestions = suggestions
self.create_config = create_config
self.internet_connection = internet_connection
self.task_name = self.i18n['web.task.suggestions']
self.taskman.register_task(self.task_id, self.task_name, get_icon_path())
def run(self):
ti = time.time()
self.taskman.update_progress(self.task_id, 0, self.i18n['task.waiting_task'].format(bold(self.create_config.task_name)))
self.create_config.join()
self.taskman.update_progress(self.task_id, 10, None)
if not self.internet_connection:
self.logger.warning("No internet connection. Only cached suggestions can be loaded")
self.suggestions = self.manager.read_cached(check_file=True)
elif not self.manager.should_download(self.create_config.config):
self.suggestions = self.manager.read_cached(check_file=False)
else:
try:
timestamp = datetime.utcnow().timestamp()
self.suggestions = self.manager.download()
if self.suggestions:
self.taskman.update_progress(self.task_id, 50, self.i18n['web.task.suggestions.saving'])
self.manager.save_to_disk(self.suggestions, timestamp)
except:
self.logger.error("Unexpected exception")
traceback.print_exc()
if self.suggestions_callback:
self.taskman.update_progress(self.task_id, 75, None)
try:
self.suggestions_callback(self.suggestions)
except:
self.logger.error("Unexpected exception")
traceback.print_exc()
self.taskman.update_progress(self.task_id, 100, None)
self.taskman.finish_task(self.task_id)
tf = time.time()
self.logger.info("Finished. Took {0:.4f} seconds".format(tf - ti))
class SearchIndexGenerator(Thread):
def __init__(self, taskman: TaskManager, idxman: SearchIndexManager, suggestions_loader: SuggestionsLoader, i18n: I18n, logger: logging.Logger):
super(SearchIndexGenerator, self).__init__(daemon=True)
self.taskman = taskman
self.idxman = idxman
self.i18n = i18n
self.logger = logger
self.suggestions_loader = suggestions_loader
self.task_id = 'web_idx_gen'
self.taskman.register_task(self.task_id, self.i18n['web.task.search_index'], get_icon_path())
def run(self):
ti = time.time()
self.taskman.update_progress(self.task_id, 0, self.i18n['task.waiting_task'].format(bold(self.suggestions_loader.task_name)))
self.suggestions_loader.join()
if self.suggestions_loader.suggestions:
self.taskman.update_progress(self.task_id, 1, None)
self.logger.info('Indexing suggestions')
index = self.idxman.generate(self.suggestions_loader.suggestions)
if index:
self.taskman.update_progress(self.task_id, 50, self.i18n['web.task.suggestions.saving'])
self.idxman.write(index)
self.taskman.update_progress(self.task_id, 100, None)
self.taskman.finish_task(self.task_id)
tf = time.time()
self.logger.info("Finished. Took {0:.4f} seconds".format(tf - ti))
class UpdateEnvironmentSettings(Thread):
def __init__(self, env_updater: EnvironmentUpdater, taskman: TaskManager, i18n: I18n, create_config: CreateConfigFile):
super(UpdateEnvironmentSettings, self).__init__(daemon=True)
self.env_updater = env_updater
self.taskman = taskman
self.create_config = create_config
self.task_id = env_updater.task_read_settings_id
self.i18n = i18n
def run(self):
self.taskman.register_task(self.task_id, self.i18n['web.task.download_settings'], get_icon_path())
self.taskman.update_progress(self.task_id, 1, self.i18n['task.waiting_task'].format(bold(self.create_config.task_name)))
self.create_config.join()
web_config = self.create_config.config
if self.env_updater.should_download_settings(web_config):
self.env_updater.read_settings(web_config=web_config, cache=False)
else:
self.taskman.update_progress(self.task_id, 100, None)
self.taskman.finish_task(self.task_id)
|
# from __future__ import print_function
# import csv
import numpy as np
import nltk
from nltk.corpus import wordnet as wn
from nltk import word_tokenize
# from nltk import FreqDist
from nltk.stem import WordNetLemmatizer
# import pandas
from nltk.corpus import stopwords
import string
# from string import maketrans
import sys
from scipy import spatial
from nltk.stem.porter import PorterStemmer
#Importing the Cosine Score Class for calculating the ranking
from CosineScore import *
###################### TF-IDF PROCESSOR #############################
class SearchQuery:
'''
This class will provide the functionality for all the postprocessing
with the following tasks:
2. Process the query
3. Then finally prove the user with the tf-idf vector and the query
vector as attributed.
'''
################ DATA ATTRIBUTES ######################
queryVector = None
tfidfMatrix = None
urlList = None
titleList = None
#tfidf = None
vocab = None
def __init__(self):
tfidf = np.load('tfidf.npz')
self.tfidfMatrix = tfidf['matrix']
self.vocab = tfidf['vocab']
self.urlList = tfidf['urls']
self.titleList = tfidf['titles']
def processQuery(self, word_bag, query_word_list):
'''
This function will take a query from the user, then make a boolean
encoding of the query in the space of the word bag.
:param word_bag: the list of the unique word in our dictionary
:param query_word_list: the list of the words in the query
'''
#Hashing the word_bag with index
word_bag=dict([(word_bag[i],i) for i in range(len(word_bag))])
#Creating the query vector
query_vector=np.zeros((len(word_bag),1))
#Iterating through the query
for term in query_word_list:
try:
term_id=word_bag[term]
query_vector[term_id] = 1
except Exception as e:
pass
self.queryVector = query_vector
############################### HANDLER #################################
def search(self, queryString,search_length=10,return_rank_list=False):
wn.ensure_loaded()
stop_words = set(stopwords.words('english'))
porter_stemmer = PorterStemmer()
wordnet_lemmatizer = WordNetLemmatizer()
query = word_tokenize(queryString)
query=[w.lower() for w in query if (w.isalpha() and w not in stop_words)]
query=[(wordnet_lemmatizer.lemmatize(w)) for w in query]
query=[porter_stemmer.stem(w) for w in query]
self.processQuery( self.vocab, query )
#Getting the page ranking for the above query
obj = CosineScore(self.queryVector, self.tfidfMatrix)
rankList = obj.getPages(search_length)
if return_rank_list==True:
return rankList
#Getting the id and url name
finalList = []
for docIndex in rankList:
finalList.append((self.titleList[docIndex], self.urlList[docIndex]))
return (finalList)
#
# def main():
# obj = SearchQuery()
# obj.search('Andy')
#
# if __name__ == '__main__':
# main()
|
#!/usr/bin/env python3
import re
from collections import namedtuple
Regex = namedtuple(
"Regex",
[
"name",
"expression",
"replacement",
]
)
#: The default regex replacements used by :func:`gatenlphiltlab.normalize`
regexes = (
Regex(
name="left_single_quote",
expression=re.compile(".*\.\w\w+.*?"),
replacement="",
),
Regex(
name="file_names",
expression=re.compile(".*\.\w\w+.*?"),
replacement="",
),
Regex(
name="file_names",
expression=re.compile(".*\.\w\w+.*?"),
replacement="",
),
Regex(
name="file_names",
expression=re.compile(".*\.\w\w+.*?"),
replacement="",
),
Regex(
name="file_names",
expression=re.compile(".*\.\w\w+.*?"),
replacement="",
),
Regex(
name="file_names",
expression=re.compile(".*\.\w\w+.*?"),
replacement="",
),
Regex(
name="file_names",
expression=re.compile(".*\.\w\w+.*?"),
replacement="",
),
Regex(
name="file_names",
expression=re.compile(".*\.\w\w+.*?"),
replacement="",
),
Regex(
name="speaker_tag",
expression=re.compile("^.*?:", re.MULTILINE),
replacement="",
),
Regex(
name="extralinguistic_tags",
expression=re.compile("{.+?}"),
replacement="",
),
Regex(
name="round_braces",
expression=re.compile("[\(\)]"),
replacement="",
),
Regex(
name="square_braces",
expression=re.compile("[\[\]]"),
replacement="",
),
Regex(
name="curly_braces",
expression=re.compile("[{}]"),
replacement="",
),
Regex(
name="tilde",
expression=re.compile("~"),
replacement="",
),
Regex(
name="backslash",
expression=re.compile(r"\\"),
replacement="",
),
Regex(
name="forward_slash",
expression=re.compile("/"),
replacement="",
),
Regex(
name="asterisk",
expression=re.compile("\*"),
replacement="",
),
Regex(
name="misc_characters",
expression=re.compile("[\$\^\+@#`_=]|<>;"),
replacement="",
),
Regex(
name="leading_spaces",
expression=re.compile("^\s+?", re.MULTILINE),
replacement="",
),
Regex(
name="trailing_spaces",
expression=re.compile("\s+?$", re.MULTILINE),
replacement="",
),
Regex(
name="extra_spaces",
expression=re.compile("\s\s+?"),
replacement=" ",
),
Regex(
name="crlf_newlines",
expression=re.compile(r"\r\n"),
replacement="\n",
),
Regex(
name="cr_newlines",
expression=re.compile(r"\r"),
replacement="\n",
),
Regex(
name="extra_newlines",
expression=re.compile(r"\n\n+?"),
replacement="\n",
),
)
|
from datetime import date, timedelta
from unittest import TestCase
from BankAccount import BankAccount, InsufficientFunds
THIRTY_DAYS_AFTER_CREATION = date.today() + timedelta(days=30)
class TestBankAccount(TestCase):
def setUp(self):
self.a = self.create_default_account()
def test_creation(self):
self.assert_account(100, date.today(), 0)
self.assertEquals(0.0001, self.a.interestRate)
def test_deposit(self):
self.a.deposit(50, THIRTY_DAYS_AFTER_CREATION)
self.assert_account(150, THIRTY_DAYS_AFTER_CREATION,0.3)
def test_withdraw(self):
self.a.withdraw(50,THIRTY_DAYS_AFTER_CREATION)
self.assert_account(50,THIRTY_DAYS_AFTER_CREATION,0.3)
with self.assertRaises(InsufficientFunds):
self.a.withdraw(200,THIRTY_DAYS_AFTER_CREATION)
def test_credit_interest(self):
self.a.deposit(50,THIRTY_DAYS_AFTER_CREATION)
self.a.credit_interest()
self.assert_account(150.3,THIRTY_DAYS_AFTER_CREATION,0)
def test_updateInterestEarned(self):
self.a.updateInterestEarned(THIRTY_DAYS_AFTER_CREATION)
self.assert_account(100,THIRTY_DAYS_AFTER_CREATION,0.3)
def create_default_account(self):
accountHolder = "Riccardo"
accountNumber = 100000
return BankAccount(accountHolder, accountNumber, 100.0, date.today(), 0.0001)
def assert_account(self, expected_balance, expected_day_last_op, expected_interest_earned):
self.assertAlmostEquals(expected_balance, self.a.balance)
self.assertAlmostEquals(expected_interest_earned, self.a.interestEarned)
self.assertEquals(expected_day_last_op, self.a.dayLastOp)
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: pyide.py
# Purpose:
#
# Author: wukan
#
# Created: 2019-01-10
# Copyright: (c) wukan 2019
# Licence: GPL-3.0
#-------------------------------------------------------------------------------
from noval import _,consts,NewId
from tkinter import messagebox
import noval.ide as ide
import noval.python.interpreter.interpretermanager as interpretermanager
from noval.util import strutils
from noval.util import utils
import noval.constants as constants
import noval.model as model
import os
import sys
import noval.syntax.lang as lang
import noval.ui_utils as ui_utils
import subprocess
import noval.util.fileutils as fileutils
import noval.terminal as terminal
import noval.ui_common as ui_common
import noval.misc as misc
import noval.python.debugger.debugger as pythondebugger
import noval.running as running
class PyIDEApplication(ide.IDEApplication):
def __init__(self):
ide.IDEApplication.__init__(self)
def OnInit(self):
if not ide.IDEApplication.OnInit(self):
return False
#这里必须用相对导入,因为搜索路径已经添加了,如果使用长路径导入会导致IntellisenceManager的实例信息和其它地方的不一样
import intellisence
from noval.project.document import ProjectDocument
self._debugger_class = pythondebugger.PythonDebugger
#pyc和pyo二进制文件类型禁止添加到项目中
ProjectDocument.BIN_FILE_EXTS = ProjectDocument.BIN_FILE_EXTS + ['pyc','pyo']
self.interpreter_combo = self.MainFrame.GetToolBar().AddCombox()
self.interpreter_combo.bind("<<ComboboxSelected>>",self.OnCombo)
if utils.is_windows():
self.InsertCommand(consts.ID_FEEDBACK,constants.ID_OPEN_PYTHON_HELP,_("&Help"),_("&Python Help Document"),handler=self.OpenPythonHelpDocument,image=self.GetImage("pydoc.png"),pos="before")
self.InsertCommand(consts.ID_CHECK_UPDATE,constants.ID_OPEN_DOCUMENTATION,_("&Help"),_("Product documentation"),handler=self.OpenDocumentation,pos="before",image=self.GetImage("documentation.png"))
self.AddCommand(constants.ID_GOTO_DEFINITION,_("&Edit"),_("Goto Definition"),self.GotoDefinition,default_tester=True,default_command=True)
self.InsertCommand(consts.ID_FEEDBACK,constants.ID_GOTO_PYTHON_WEB,_("&Help"),_("&Python Website"),handler=self.GotoPythonWebsite,pos="before")
#解释器菜单插入在插件管理菜单之前
self.InsertCommand(consts.ID_PLUGIN,constants.ID_OPEN_INTERPRETER,_("&Tools"),_("&Interpreter"),self.OpenInterpreter,image=self.GetImage("python/interpreter.png"),pos="before")
self.AddCommand(constants.ID_PREFERENCES,_("&Tools"),_("&Options..."),self.OnOptions,image=self.GetImage("prefer.png"),add_separator=True,\
separator_location="top")
edit_menu = self.Menubar.GetMenu(_("&Edit"))
insert_menu = edit_menu.GetMenu(constants.ID_INSERT)
self.AddMenuCommand(constants.ID_INSERT_DECLARE_ENCODING,insert_menu,_("Insert Encoding Declare"),self.InsertEncodingDeclare,default_tester=True,default_command=True)
self.AddCommand(constants.ID_START_WITHOUT_DEBUG,_("&Run"),_("&Start Without Debugging"),self.RunWithoutDebug,default_tester=True,default_command=True)
self.AddCommand(constants.ID_SET_EXCEPTION_BREAKPOINT,_("&Run"),_("&Exceptions..."),self.SetExceptionBreakPoint,default_tester=True,default_command=True,add_separator=True)
self.AddCommand(constants.ID_STEP_INTO,_("&Run"),_("&Step Into"),self.StepInto,default_tester=True,default_command=True,image=self.GetImage('python/debugger/step_into.png'))
self.AddCommand(constants.ID_STEP_NEXT,_("&Run"),_("&Step Over"),self.StepNext,default_tester=True,default_command=True,add_separator=True,image=self.GetImage('python/debugger/step_next.png'))
self.AddCommand(constants.ID_CHECK_SYNTAX,_("&Run"),_("&Check Syntax..."),self.CheckSyntax,default_tester=True,default_command=True)
self.AddCommand(constants.ID_SET_PARAMETER_ENVIRONMENT,_("&Run"),_("&Set Parameter And Environment"),self.SetParameterEnvironment,default_tester=True,default_command=True,image=self.GetImage('python/debugger/runconfig.png'))
self.AddCommand(constants.ID_RUN_LAST,_("&Run"),_("&Run Using Last Settings"),self.RunLast,default_tester=True,default_command=True)
self.AddCommand(constants.ID_DEBUG_LAST,_("&Run"),_("&Debug Using Last Settings"),self.DebugLast,default_tester=True,default_command=True,add_separator=True)
self.AddCommand(constants.ID_TOGGLE_BREAKPOINT,_("&Run"),_("&Toggle Breakpoint"),self.ToogleBreakPoint,default_tester=True,default_command=True,image=self.GetImage('python/debugger/breakpoint.png'))
self.AddCommand(constants.ID_CLEAR_ALL_BREAKPOINTS,_("&Run"),_("&Clear All Breakpoints"),self.ClearAllBreakPoints,default_tester=True,default_command=False)
#关闭软件启动图片
self.CloseSplash()
self.bind(constants.DOUBLECLICKPATH_EVT,self.request_focus_into,True)
self.LoadDefaultInterpreter()
self.AddInterpreters()
#在加载解释器后发送安装必须插件的消息
self.event_generate("InstallRequiredPluginsMsg")
self.intellisence_mananger = intellisence.IntellisenceManager()
self.intellisence_mananger.generate_default_intellisence_data()
return True
def request_focus_into(self, event):
'''
双击文件视图文件夹时同时在解释器shell中切换路径
'''
path = event.get('path')
if not os.path.isdir(path) or not utils.profile_get_int("ExecuateCdDoubleClick", True):
return
self.MainFrame.ShowView(consts.PYTHON_INTERPRETER_VIEW_NAME,toogle_visibility_flag=True)
shell = self.MainFrame.GetCommonView(consts.PYTHON_INTERPRETER_VIEW_NAME)
proxy = shell.Runner.get_backend_proxy()
if (
proxy
and proxy.uses_local_filesystem()
and proxy.get_cwd() != path
and shell.Runner.is_waiting_toplevel_command()
):
shell.submit_magic_command(running.construct_cd_command(path))
def GetInterpreterManager(self):
return interpretermanager.InterpreterManager()
def SetExceptionBreakPoint(self):
self.MainFrame.GetView(consts.BREAKPOINTS_TAB_NAME).SetExceptionBreakPoint()
def StepNext(self):
self.GetDebugger().StepNext()
def StepInto(self):
self.GetDebugger().StepInto()
def DebugLast(self):
self.GetDebugger().DebugLast()
def RunLast(self):
self.GetDebugger().RunLast()
def CheckSyntax(self):
self.GetDebugger().CheckScript()
def SetParameterEnvironment(self):
self.GetDebugger().SetParameterAndEnvironment()
def ToogleBreakPoint(self):
current_view = self.GetDocumentManager().GetCurrentView()
#只有python文件才能设置断点
if current_view is None or not hasattr(current_view,"ToogleBreakpoint"):
return
current_view.GetCtrl().ToogleBreakpoint()
def ClearAllBreakPoints(self):
self.MainFrame.GetView(consts.BREAKPOINTS_TAB_NAME).ClearAllBreakPoints()
@misc.update_toolbar
def LoadDefaultInterpreter(self):
interpretermanager.InterpreterManager().LoadDefaultInterpreter()
def GotoDefinition(self):
current_view = self.GetDocumentManager().GetCurrentView()
#只有python文件才能执行转到定义功能
if current_view is None or not hasattr(current_view,"GotoDefinition"):
return
current_view.GotoDefinition()
def LoadDefaultPlugins(self):
'''
加载python默认插件
'''
import noval.preference as preference
import noval.python.interpreter.gerneralconfiguration as interpretergerneralconfiguration
import noval.python.interpreter.interpreterconfigruation as interpreterconfigruation
import noval.keybinds as keybinds
ide.IDEApplication.LoadDefaultPlugins(self)
#添加Python语言仅有的首选项面板,在other面板之前
preference.PreferenceManager().AddOptionsPanelClass(preference.INTERPRETER_OPTION_NAME,preference.GENERAL_ITEM_NAME,interpretergerneralconfiguration.InterpreterGeneralConfigurationPanel)
preference.PreferenceManager().AddOptionsPanelClass(preference.INTERPRETER_OPTION_NAME,preference.INTERPRETER_CONFIGURATIONS_ITEM_NAME,interpreterconfigruation.InterpreterConfigurationPanel)
#不能用斜杆分割,斜杆作为数据默认分割符,显示时会替换|为/
preference.PreferenceManager().AddOptionsPanelClass("Debug|Run","Debug",pythondebugger.DebuggerOptionsPanel)
preference.PreferenceManager().AddOptionsPanelClass("Debug|Run","Output",pythondebugger.OutputOptionsPanel)
preference.PreferenceManager().AddOptionsPanelClass("Debug|Run","Run",pythondebugger.RunOptionsPanel)
preference.PreferenceManager().AddOptionsPanelClass("Misc","KeyBindings",keybinds.KeybindOptionPanel)
consts.DEFAULT_PLUGINS += ("noval.python.project.browser.ProjectViewLoader",)
consts.DEFAULT_PLUGINS += ('noval.python.plugins.pyshell.pyshell.PyshellViewLoader',)
#window面板在outline面板之前,故需在outline之前初始化
consts.DEFAULT_PLUGINS += ('noval.plugins.windowservice.WindowServiceLoader',)
consts.DEFAULT_PLUGINS += ('noval.python.plugins.outline.PythonOutlineViewLoader',)
consts.DEFAULT_PLUGINS += ('noval.python.project.viewer.DefaultProjectTemplateLoader',)
consts.DEFAULT_PLUGINS += ('noval.python.plugins.unittest.UnittestLoader',)
consts.DEFAULT_PLUGINS += ('noval.python.debugger.watchs.WatchsViewLoader',)
consts.DEFAULT_PLUGINS += ('noval.python.debugger.breakpoints.BreakpointsViewLoader',)
consts.DEFAULT_PLUGINS += ('noval.python.debugger.stacksframe.StackframeViewLoader',)
consts.DEFAULT_PLUGINS += ('noval.python.debugger.inspectconsole.InspectConsoleViewLoader',)
consts.DEFAULT_PLUGINS += ('noval.python.plugins.pip_gui.PluginManagerGUI',)
def CreateLexerTemplates(self):
from noval.syntax import synglob
#添加parser路径,导入模块时用相对路径即可.
parser_path = os.path.join(utils.get_app_path(),"noval","python","parser")
sys.path.append(parser_path)
ide.IDEApplication.CreateLexerTemplates(self)
synglob.LexerFactory().CreateLexerTemplates(self.GetDocumentManager(),model.LANGUAGE_PYTHON)
def GetCurrentInterpreter(self):
return interpretermanager.InterpreterManager().GetCurrentInterpreter()
def Quit(self):
if not self.AllowClose():
return
self.intellisence_mananger.Stop()
ide.IDEApplication.Quit(self)
@property
def OpenProjectPath(self):
return self._open_project_path
def GetIDESplashBitmap(self):
return os.path.join(utils.get_app_image_location(),"python/welcome.png")
def AddInterpreters(self):
names = interpretermanager.InterpreterManager().GetInterpreterNames()
names.append(_("Configuration"),)
self.interpreter_combo['values'] = names
self.SetCurrentInterpreter()
def SetCurrentInterpreter(self):
current_interpreter = interpretermanager.InterpreterManager().GetCurrentInterpreter()
if current_interpreter is None:
return
for i in range(len(self.interpreter_combo['values'])):
data = interpretermanager.InterpreterManager().interpreters[i]
if data == current_interpreter:
self.interpreter_combo.current(i)
break
@misc.update_toolbar
def OnCombo(self,event):
selection = self.interpreter_combo.current()
prompt = False
if selection == len(self.interpreter_combo['values']) - 1:
if pythondebugger.BaseDebuggerUI.DebuggerRunning():
prompt = True
else:
ui_common.ShowInterpreterConfigurationPage()
else:
interpreter = interpretermanager.InterpreterManager().interpreters[selection]
if interpreter != self.GetCurrentInterpreter() and pythondebugger.BaseDebuggerUI.DebuggerRunning():
prompt = True
else:
self.SelectInterpreter(interpreter)
if prompt:
messagebox.showinfo(self.GetAppName(),_("Please stop the debugger first!"),parent=self.GetTopWindow())
self.SetCurrentInterpreter()
def OpenDocumentation(self):
fileutils.startfile("https://wekay.gitee.io/novalide")
def OpenPythonHelpDocument(self):
interpreter = self.GetCurrentInterpreter()
if interpreter is None:
return
if interpreter.HelpPath == "":
return
fileutils.startfile(interpreter.HelpPath)
def GotoPythonWebsite(self):
fileutils.startfile("http://www.python.org")
def SelectInterpreter(self,interpreter):
if interpreter != interpretermanager.InterpreterManager().GetCurrentInterpreter():
interpretermanager.InterpreterManager().SetCurrentInterpreter(interpreter)
if self.intellisence_mananger.IsRunning:
return
self.intellisence_mananger.load_intellisence_data(interpreter)
#切换解释器时是否更新解释器的后端进程
if utils.profile_get_int('UPDATE_SHELL_SWITCH_INTERPRETER',True):
self.event_generate("UpdateShell")
def GetDefaultLangId(self):
return lang.ID_LANG_PYTHON
def InsertCodingDeclare(self):
pass
def OpenInterpreter(self):
interpreter = self.GetCurrentInterpreter()
if interpreter is None:
messagebox.showinfo(self.GetAppName(),_("No interpreter..."))
return
try:
if utils.is_windows():
fileutils.startfile(interpreter.Path)
else:
cmd_list = ['gnome-terminal','-x','bash','-c',interpreter.Path]
subprocess.Popen(cmd_list,shell = False)
except Exception as e:
messagebox.showerror(_("Open Error"),_("%s") % str(e),parent=self.GetTopWindow())
def OpenTerminator(self,filename=None):
if filename:
if os.path.isdir(filename):
cwd = filename
else:
cwd = os.path.dirname(filename)
else:
cwd = os.getcwd()
#打开终端时不嵌入解释器环境
if not utils.profile_get_int("EmbedInterpreterInterminator", True):
ide.IDEApplication.OpenTerminator(self,filename)
return
interpreter = self.GetCurrentInterpreter()
if interpreter is None:
ide.IDEApplication.OpenTerminator(self,filename)
return
else:
target_executable = interpreter.Path
exe_dirs = interpreter.GetExedirs()
env_overrides = {}
env_overrides["PATH"] = ui_utils.get_augmented_system_path(exe_dirs)
#设置安装路径的环境变量,运行程序时需要在路径中移除此路径
#python2.7中环境变量不能为unicode类型
env_overrides['MAIN_MODULE_APTH'] = str(utils.get_app_path())
explainer = os.path.join(os.path.dirname(__file__), "explain_environment.py")
cmd = [target_executable, explainer]
#检测是否虚拟解释器
activate = os.path.join(os.path.dirname(target_executable),
"activate.bat" if utils.is_windows()
else "activate")
if os.path.isfile(activate):
del env_overrides["PATH"]
if utils.is_windows():
cmd = [activate, "&"] + cmd
else:
cmd = ["source", activate, ";"] + cmd
return terminal.run_in_terminal(cmd, cwd, env_overrides, True)
def RunWithoutDebug(self):
self.GetDebugger().RunWithoutDebug()
def InsertEncodingDeclare(self,text_view = None):
if text_view is None:
text_view = self.GetDocumentManager().GetCurrentView()
lines = text_view.GetCtrl().GetTopLines(consts.ENCODING_DECLARE_LINE_NUM)
coding_name,line_num = strutils.get_python_coding_declare(lines)
if coding_name is not None:
ret = messagebox.askyesno(_("Declare Encoding"),_("The Python Document have already declare coding,Do you want to overwrite it?"),parent=text_view.GetFrame())
if ret == True:
text_view.SetSelection(text_view.GetCtrl().PositionFromLine(line_num),text_view.GetCtrl().PositionFromLine(line_num+1))
text_view.GetCtrl().DeleteBack()
else:
return True
dlg = ui_utils.EncodingDeclareDialog(text_view.GetFrame())
if dlg.ShowModal() == constants.ID_OK:
text_view.GetCtrl().GotoPos(0,0)
text_view.AddText(dlg.name_var.get() + "\n")
return True
return False
def UpdateUI(self,command_id):
if command_id == constants.ID_CLEAR_ALL_BREAKPOINTS:
return 0 != len(self.MainFrame.GetView(consts.BREAKPOINTS_TAB_NAME).GetMasterBreakpointDict())
current_project = self.MainFrame.GetProjectView(False).GetCurrentProject()
current_interpreter = self.GetCurrentInterpreter()
builtin_item_ids = [constants.ID_RUN,constants.ID_SET_EXCEPTION_BREAKPOINT,constants.ID_STEP_INTO,constants.ID_STEP_NEXT,constants.ID_RUN_LAST]
all_item_ids = builtin_item_ids + [constants.ID_SET_PARAMETER_ENVIRONMENT,constants.ID_DEBUG_LAST,constants.ID_START_WITHOUT_DEBUG]
#使用内建解释器时,禁止运行按钮和菜单
if command_id in builtin_item_ids:
if current_interpreter is None or current_interpreter.IsBuiltIn:
return False
elif command_id in all_item_ids:
if current_interpreter is None:
return False
if current_project is not None:
if command_id in all_item_ids:
return True
return ide.IDEApplication.UpdateUI(self,command_id)
|
# -*- coding: utf-8 -*-
"""
Data engine implementation based on lighting memory database
(http://symas.com/mdb/).
The Lmdb is initialized, the access needs to use its binding API, though.
Extension packages may provide higher-level APIs based on this.
"""
from __future__ import absolute_import, division, unicode_literals
import os
import logging
import lmdb
from ava.util import time_uuid
from ava.runtime import environ
from ava.spi.errors import DataNotFoundError, DataError
_DATA_FILE_DIR = b'data'
logger = logging.getLogger(__name__)
class Store(object):
def __init__(self, name, _db, _engine):
self.name = name
self._db = _db
self._engine = _engine
def __len__(self):
with self._engine.database.begin() as txn:
stat = txn.stat(self._db)
return stat['entries']
def __getitem__(self, key):
with self._engine.cursor(self.name) as cur:
return cur.get(key)
def __setitem__(self, key, value):
with self._engine.cursor(self.name, readonly=False) as cur:
cur.put(key, value)
def __delitem__(self, key):
with self._engine.cursor(self.name, readonly=False) as cur:
cur.remove(key)
def __iter__(self):
return self._engine.cursor(self.name).iternext()
def put(self, key, value):
with self._engine.cursor(self.name, readonly=False) as cur:
return cur.put(key, value)
def get(self, key):
with self._engine.cursor(self.name, readonly=True) as cur:
return cur.get(key)
def remove(self, key):
with self._engine.cursor(self.name, readonly=False) as cur:
return cur.remove(key)
def cursor(self, readonly=True):
return self._engine.cursor(self.name, readonly=readonly)
class Cursor(object):
def __init__(self, _txn, _db, _readonly=True):
self._txn = _txn
self._db = _db
self._readonly = _readonly
self._cursor = lmdb.Cursor(_db, _txn)
def __enter__(self, *args, **kwargs):
self._txn.__enter__(*args, **kwargs)
self._cursor.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._cursor.__exit__(exc_type, exc_val, exc_tb)
self._txn.__exit__(exc_type, exc_val, exc_tb)
def first(self):
return self._cursor.first()
def next(self):
return self._cursor.next()
def prev(self):
return self._cursor.prev()
def last(self):
return self._cursor.last()
def iternext(self, keys=True, values=False):
return self._cursor.iternext(keys=True, values=False)
def iterprev(self, keys=True, values=False):
return self._cursor.iterprev(keys=True, values=False)
def close(self):
self._cursor.close()
def value(self):
"""
Gets raw value of the record.
:return: record's value.
"""
return self._cursor.value()
def key(self):
return self._cursor.key()
def get(self, key):
if not self._cursor.set_key(key):
return None
return self._cursor.value()
def load(self, key):
"""
Same as get method, except raising exception if entry not found.
:param _key: item key.
:return: the value.
"""
ret = self.get(key)
if ret is None:
raise DataNotFoundError()
return ret
def delete(self):
"""
Actually deletes document and its revisions if required.
:return:
"""
return self._cursor.delete(True)
def remove(self, key):
"""
Delete the current element and move to the next, returning True on
success or False if the store was empty
:return:
"""
if isinstance(key, unicode):
key = key.encode('utf-8')
if not self._cursor.set_key(key):
return False
return self._cursor.delete(True)
def seek(self, key):
"""
Finds the document with the provided ID and moves position to its first revision.
:param key:
:return: True if found; False, otherwise.
"""
if isinstance(key, unicode):
key = key.encode('utf-8')
return self._cursor.set_key(key)
def seek_range(self, key):
"""
Finds the document whose ID is greater than or equal to the provided
ID and moves position to its first revision.
:param key:
:return:
"""
return self._cursor.set_range(key)
def count(self):
"""
Return the number of values (“duplicates”) for the current key.
Only meaningful for databases opened with dupsort=True.
:return:
"""
return self._cursor.count()
def post(self, value):
key = time_uuid.utcnow().hex
if self._cursor.put(key, value):
return key
return None
def pop(self):
"""
Fetch the first document then delete it. Returns None if no value
existed.
:return:
"""
if self._cursor.first():
return self._cursor.pop(self._cursor.key())
def put(self, key, value):
if isinstance(key, unicode):
key = key.encode('utf-8')
return self._cursor.put(key, value)
def exists(self, key):
if isinstance(key, unicode):
key = key.encode('utf-8')
if self._cursor.set_key(key):
return True
return False
class DataEngine(object):
def __init__(self):
logger.debug("Initializing data engine...")
self.datapath = None
self.database = None
self.stores = {}
def start(self, ctx=None):
logger.debug("Starting data engine...")
# register with the context
ctx.bind('dataengine', self)
self.datapath = os.path.join(environ.pod_dir(), _DATA_FILE_DIR)
logger.debug("Data path: %s", self.datapath)
try:
self.database = lmdb.Environment(self.datapath, max_dbs=1024)
with self.database.begin(write=False) as txn:
cur = txn.cursor()
for k, v in iter(cur):
logger.debug("Found existing store: %s", k)
_db = self.database.open_db(k, create=False)
self.stores[k] = Store(k, _db, self)
except lmdb.Error:
logger.exception("Failed to open database.", exc_info=True)
raise
logger.debug("Data engine started.")
def stop(self, ctx=None):
logger.debug("Stopping data engine...")
if self.database:
self.database.close()
logger.debug("Data engine stopped.")
def store_names(self):
return self.stores.keys()
def create_store(self, name):
try:
_db = self.database.open_db(name, dupsort=False, create=True)
store = Store(name, _db, self)
self.stores[name] = store
return store
except lmdb.Error as ex:
logger.exception(ex)
raise DataError(ex.message)
def get_store(self, name, create=True):
result = self.stores.get(name)
if result is None and create:
return self.create_store(name)
return result
def remove_store(self, name):
try:
store = self.stores.get(name)
if store is not None:
with self.database.begin(write=True) as txn:
txn.drop(store._db)
del self.stores[name]
except lmdb.Error as ex:
logger.exception("Failed to remove store.", ex)
raise DataError(ex.message)
def remove_all_stores(self):
for name in self.stores.keys():
self.remove_store(name)
def store_exists(self, name):
return name in self.stores
def cursor(self, store_name, readonly=True):
_write = True
if readonly:
_write = False
_db = self.database.open_db(store_name, create=False, dupsort=True)
_txn = self.database.begin(write=_write, buffers=False)
return Cursor(_txn, _db, _readonly=readonly)
def stat(self):
ret = self.database.stat()
return ret
def __iter__(self):
return self.stores.iterkeys()
def __getitem__(self, store_name):
return self.get_store(store_name)
def __delitem__(self, store_name):
return self.remove_store(store_name)
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest import exceptions
import tempest.test
CONF = config.CONF
class BaseDataProcessingTest(tempest.test.BaseTestCase):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(BaseDataProcessingTest, cls).setUpClass()
if not CONF.service_available.sahara:
raise cls.skipException('Sahara support is required')
cls.os = cls.get_client_manager()
cls.client = cls.os.data_processing_client
cls.flavor_ref = CONF.compute.flavor_ref
# add lists for watched resources
cls._node_group_templates = []
cls._cluster_templates = []
cls._data_sources = []
cls._job_binary_internals = []
cls._job_binaries = []
cls._jobs = []
@classmethod
def tearDownClass(cls):
cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
cls.client.delete_cluster_template)
cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
cls.client.delete_node_group_template)
cls.cleanup_resources(getattr(cls, '_jobs', []), cls.client.delete_job)
cls.cleanup_resources(getattr(cls, '_job_binaries', []),
cls.client.delete_job_binary)
cls.cleanup_resources(getattr(cls, '_job_binary_internals', []),
cls.client.delete_job_binary_internal)
cls.cleanup_resources(getattr(cls, '_data_sources', []),
cls.client.delete_data_source)
cls.clear_isolated_creds()
super(BaseDataProcessingTest, cls).tearDownClass()
@staticmethod
def cleanup_resources(resource_id_list, method):
for resource_id in resource_id_list:
try:
method(resource_id)
except exceptions.NotFound:
# ignore errors while auto removing created resource
pass
@classmethod
def create_node_group_template(cls, name, plugin_name, hadoop_version,
node_processes, flavor_id,
node_configs=None, **kwargs):
"""Creates watched node group template with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
_, resp_body = cls.client.create_node_group_template(name, plugin_name,
hadoop_version,
node_processes,
flavor_id,
node_configs,
**kwargs)
# store id of created node group template
cls._node_group_templates.append(resp_body['id'])
return resp_body
@classmethod
def create_cluster_template(cls, name, plugin_name, hadoop_version,
node_groups, cluster_configs=None, **kwargs):
"""Creates watched cluster template with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
_, resp_body = cls.client.create_cluster_template(name, plugin_name,
hadoop_version,
node_groups,
cluster_configs,
**kwargs)
# store id of created cluster template
cls._cluster_templates.append(resp_body['id'])
return resp_body
@classmethod
def create_data_source(cls, name, type, url, **kwargs):
"""Creates watched data source with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
_, resp_body = cls.client.create_data_source(name, type, url, **kwargs)
# store id of created data source
cls._data_sources.append(resp_body['id'])
return resp_body
@classmethod
def create_job_binary_internal(cls, name, data):
"""Creates watched job binary internal with specified params.
It returns created object. All resources created in this method will
be automatically removed in tearDownClass method.
"""
_, resp_body = cls.client.create_job_binary_internal(name, data)
# store id of created job binary internal
cls._job_binary_internals.append(resp_body['id'])
return resp_body
@classmethod
def create_job_binary(cls, name, url, extra=None, **kwargs):
"""Creates watched job binary with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
_, resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
# store id of created job binary
cls._job_binaries.append(resp_body['id'])
return resp_body
@classmethod
def create_job(cls, name, job_type, mains, libs=None, **kwargs):
"""Creates watched job with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
_, resp_body = cls.client.create_job(name,
job_type, mains, libs, **kwargs)
# store id of created job
cls._jobs.append(resp_body['id'])
return resp_body
|
# -*- coding: utf-8 -*-
# python std lib
import sys
# djinja package imports
from djinja import cli
class TestCLI(object):
def test_cli(self, tmpdir):
"""
Test that when passing in certain arguments from commandline they
are handled correctly by docopt and that the method creates a Core object
and runs main method and the args dict passed in have correct format
"""
input = tmpdir.join("Dockerfile.jinja")
input.write("foobar")
output = tmpdir.join("Dockerfile")
dsfile = tmpdir.join("datasource.py")
dsfile.write("#")
sys.argv = [
'scripts/dj',
'-d', str(input),
'-o', str(output),
'-e', 'OS=ubuntu:12.04',
'-s', str(dsfile),
'-vvvvv'
]
expected = {
'--dockerfile': str(input),
'--env': ['OS=ubuntu:12.04'],
'--help': False,
'--outfile': str(output),
'--quiet': False,
'--verbosity': 5,
'--version': False
}
c = cli.main()
for k, v in expected.items():
assert k in c.args
assert str(dsfile) in c.args["--datasource"]
|
#!/usr/bin/env python
from walt.server.threads.main.snmp.base import Variant, VariantsSet, VariantProxy
from walt.server.threads.main.snmp.mibs import load_mib, unload_mib, get_loaded_mibs
POE_PORT_ENABLED=1
POE_PORT_DISABLED=2
POE_PORT_SPEEDS=(10**7, 10**8, 10**9) # 10Mb/s 100Mb/s 1Gb/s
POE_PORT_MAPPING_CACHE = {}
# In POWER-ETHERNET-MIB, PoE ports are identified using a
# tuple (grp_index, port_index).
# grp_index identifies a PoE port group (box in the stack,
# module in a rack, etc. or 1 for non-modular devices).
# port_index identifies the PoE port within this group.
# On the other hand, LLDP ports are identified using a global
# index meaningful for the whole switch.
# When we want to alter PoE state on a port, we have to compute
# the appropriate (grp_index, port_index) given the LLDP port index.
# The following solution works on our stacked switches:
# - list the switch ports with speed 10Mb/s 100Mb/s or 1Gb/s
# (these are the possible speeds for a PoE port), using IF-MIB
# - list the PoE ports, using POWER-ETHERNET-MIB
# - verify that these 2 lists have the same length
# - "zip" these 2 lists in order to attach each LLDP port to its
# corresponding PoE port identification tuple.
# On netgear 8-port switches, this fails because unconnected ports
# return a speed of 0 (and some other interfaces too (fiber, vlans)).
# In this case, we check if the list of ports with type
# 'ethernet CSMA/CD' is a continuous range. In this case,
# we can probably consider the PoE ports are enumerated
# the same way. IMPORTANT: This only works with switches where the
# PoE capable ports are listed first.
MSG_ISSUE_POE_PORT_MAPPING = """\
WalT could not guess how to correlate PoE ports and LLDP ports
on switch %s. Sorry."""
ETHERNETCSMACD = 6
def get_poe_port_mapping(snmp_proxy, host):
if not host in POE_PORT_MAPPING_CACHE:
if b"IF-MIB" not in get_loaded_mibs():
load_mib(b"IF-MIB")
iface_port_indexes = list(
int(k) for k, v in snmp_proxy.ifSpeed.items()
if v in POE_PORT_SPEEDS)
iface_type_indexes = list(
int(k) for k, v in snmp_proxy.ifType.items()
if int(v) == ETHERNETCSMACD)
poe_port_indexes = list(
(int(grp_idx), int(grp_port)) \
for grp_idx, grp_port in snmp_proxy.pethPsePortAdminEnable.keys())
# check if we have the same number of poe ports and 10/100/1000 ports
if len(iface_port_indexes) == len(poe_port_indexes):
# this probably means we can associate iface_port_indexes and
# poe_port_indexes one by one:
iface_to_poe_index = { a: b for a, b in \
zip(iface_port_indexes, poe_port_indexes) }
# otherwise, check if we have a linear range of ethernet ports
# (i.e. there are no holes in those port indexes)
elif max(a-b for a, b in \
zip(iface_type_indexes[1:], iface_type_indexes[:-1])) == 1:
# this probably means we can associate iface_type_indexes and
# poe_port_indexes one by one:
iface_to_poe_index = { a: b for a, b in \
zip(iface_type_indexes, poe_port_indexes) }
else:
raise RuntimeError(MSG_ISSUE_POE_PORT_MAPPING % host)
POE_PORT_MAPPING_CACHE[host] = iface_to_poe_index
return POE_PORT_MAPPING_CACHE[host]
class StandardPoE(Variant):
@classmethod
def test_or_exception(cls, snmp_proxy):
list(snmp_proxy.pethPsePortAdminEnable.keys())
@classmethod
def load(cls):
load_mib(b"POWER-ETHERNET-MIB")
@classmethod
def unload(cls):
unload_mib(b"POWER-ETHERNET-MIB")
@classmethod
def check_poe_enabled(cls, snmp_proxy, port_mapping, switch_port):
poe_port = port_mapping[switch_port]
return int(snmp_proxy.pethPsePortAdminEnable[poe_port]) == POE_PORT_ENABLED
@classmethod
def set_port(cls, snmp_proxy, port_mapping, switch_port, active_or_not):
port_state = POE_PORT_ENABLED if active_or_not else POE_PORT_DISABLED
poe_port = port_mapping[switch_port]
snmp_proxy.pethPsePortAdminEnable[poe_port] = port_state
@classmethod
def check_poe_in_use(cls, snmp_proxy, port_mapping, switch_port):
poe_port = port_mapping[switch_port]
return int(snmp_proxy.pethPsePortDetectionStatus[poe_port]) == 3 # 'deliveringPower'
@classmethod
def get_poe_port_mapping(cls, snmp_proxy, host):
return get_poe_port_mapping(snmp_proxy, host)
# Netgear variant is the same as standard one except that the loaded MIB is not the same
class NetgearPoE(StandardPoE):
@classmethod
def load(cls):
load_mib(b"NETGEAR-POWER-ETHERNET-MIB")
@classmethod
def unload(cls):
unload_mib(b"NETGEAR-POWER-ETHERNET-MIB")
# TP-link variant (not standard)
class TPLinkPoE(Variant):
@classmethod
def test_or_exception(cls, snmp_proxy):
dict(snmp_proxy.tpPoePortStatus)
@classmethod
def load(cls):
load_mib(b"TPLINK-MIB")
load_mib(b"TPLINK-POWER-OVER-ETHERNET-MIB")
@classmethod
def unload(cls):
unload_mib(b"TPLINK-POWER-OVER-ETHERNET-MIB")
unload_mib(b"TPLINK-MIB")
@classmethod
def check_poe_enabled(cls, snmp_proxy, port_mapping, switch_port):
poe_port = port_mapping[switch_port]
return int(snmp_proxy.tpPoePortStatus[poe_port]) == 1
@classmethod
def set_port(cls, snmp_proxy, port_mapping, switch_port, active_or_not):
port_state = 1 if active_or_not else 0
poe_port = port_mapping[switch_port]
snmp_proxy.tpPoePortStatus[poe_port] = port_state
@classmethod
def check_poe_in_use(cls, snmp_proxy, port_mapping, switch_port):
poe_port = port_mapping[switch_port]
return int(snmp_proxy.tpPoePowerStatus[poe_port]) == 2 # 'on'
@classmethod
def get_poe_port_mapping(cls, snmp_proxy, host):
return { int(k): int(k) for k in dict(snmp_proxy.tpPoePortStatus).keys() }
# TP-link should be first, otherwise sending invalid requests when probing other variants
# seem to cause it to temporarily stop answering all requests (DoS mitigation?)
POE_VARIANTS = VariantsSet('PoE SNMP requests', (TPLinkPoE, StandardPoE, NetgearPoE))
class PoEProxy(VariantProxy):
def __init__(self, snmp_proxy, host):
VariantProxy.__init__(self, snmp_proxy, host, POE_VARIANTS)
self.port_mapping = self.variant.get_poe_port_mapping(snmp_proxy, host)
def check_poe_enabled(self, switch_port):
return self.variant.check_poe_enabled(self.snmp, self.port_mapping, switch_port)
def set_port(self, switch_port, active_or_not):
self.variant.set_port(self.snmp, self.port_mapping, switch_port, active_or_not)
def check_poe_in_use(self, switch_port):
return self.variant.check_poe_in_use(self.snmp, self.port_mapping, switch_port)
|
# -*- coding=utf8 -*-
"""thrift server"""
import socket
from .ev import server_run
from .const import LISTEN_BACKLOG
class ThriftServer(object):
"""thrift server
Attributes:
service: thrift service
handler: thrift handler
sock (socket)
"""
def __init__(self, service, handler):
self.sock = None
self.service = service
self.handler = handler
def bind_and_listen(self, host, port, reuse_port):
"""绑定并监听端口"""
self.sock = socket.socket()
# 监听套接字设置为非阻塞
self.sock.setblocking(False)
# 允许多个进程使用同一端口 方便扩展
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
self.sock.bind((host, port))
self.sock.listen(LISTEN_BACKLOG)
def serve(self):
"""启动服务"""
try:
server_run(self.sock, self.service, self.handler)
finally:
self.sock.close()
def make_server(service, handler, host, port, reuse_port=False):
"""生成thrift server
Args:
service: thrift service
handler: thrift handler
host (basestring): 服务地址
port (int): 服务端口
reuse_port (bool): 是否多个进程监听同一端口
Returns:
ThriftServer
"""
server = ThriftServer(service, handler)
server.bind_and_listen(host, port, reuse_port)
return server
|
# -*- coding: utf-8 -*-
"""
Tests for the model.
"""
__author__ = 'Yves-Noel Weweler <y.weweler@fh-muenster.de>'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.