content stringlengths 5 1.05M |
|---|
import numpy as np
import matplotlib.pyplot as plt
from . import SBM, LBM
from .utils import (
lbm_merge_group,
sbm_merge_group,
lbm_split_group,
sbm_split_group,
)
from typing import Any, Tuple, Union, Optional
from scipy.sparse import spmatrix
import scipy.sparse as sp
import logging
logger = logging.getLogger(__name__)
try:
import cupy
import cupyx
import GPUtil
_CUPY_INSTALLED = True
_DEFAULT_USE_GPU = True
except ImportError:
_CUPY_INSTALLED = False
_DEFAULT_USE_GPU = False
class ModelSelection:
"""
Explore and select the optimal number of classes for the LBM or SBM model.
The best model is chosen according to the Integrated Completed Likelihood.
A strategy of merging and splitting classes to produce good initializations is used.
Examples
--------
>>> lbm_model_selection = ModelSelection(
... model_type="LBM",
... plot=True,
... )
>>> lbm_selected = lbm_model_selection.fit(graph)
"""
def __init__(
self,
model_type: str,
*,
n_clusters_max: Optional[int] = 30,
use_gpu: Optional[bool] = True,
gpu_index: Optional[int] = None,
plot: Optional[bool] = True,
) -> None:
"""
Parameters
----------
model_type : {'LBM', 'SBM'}
The type of co-clustering model to use.
n_clusters_max : int, optional, default: 30
Upper limit of the number of classes.
use_gpu : bool, optional, default: True
Specify if a GPU should be used.
gpu_index : int, optional, default: None
Specify the gpu index if needed.
plot : bool, optional, default: True
Display model exploration plot.
"""
if not (model_type == "LBM" or model_type == "SBM"):
raise Exception("model_type parameter must be 'SBM' or 'LBM'")
self._model_type = model_type
self._use_gpu = use_gpu if (use_gpu and _CUPY_INSTALLED) else False
self._gpu_index = gpu_index
self._plot = plot
self._figure = plt.subplots(1) if plot else None
self.model_explored = None
self.n_clusters_max = n_clusters_max
@property
def selected_model(self) -> Union[LBM, SBM]:
"""sparsebm.LBM or sparsebm.SBM: Returns the optimal model explore so far."""
assert self.model_explored, "Model selection not trained. Use fit()"
return max(
[m["model"] for m in self.model_explored.values()],
key=lambda x: x.get_ICL(),
)
def fit(
self,
graph: Union[spmatrix, np.ndarray],
symmetric: Optional[bool] = False,
) -> Union[LBM, SBM]:
""" Perform model selection of the co-clustering.
Parameters
----------
graph : numpy.ndarray or scipy.sparse.spmatrix, shape=(n_samples, n_features) for the LBM or (n_samples, n_samples) for the SBM
Matrix to be analyzed
symmetric : bool, optional, default: False
In case of SBM model, specify if the graph connections are symmetric.
Returns
-------
sparsebm.LBM or sparsebm.SBM
The best trained model according to the ICL.
"""
if self._model_type == "SBM" and graph.shape[0] != graph.shape[0]:
raise Exception(
"For SBM, graph shapes must be equals (n_samples, n_samples)."
)
self._symmetric = symmetric
self.graph = graph
self._indices_ones = np.asarray(list(graph.nonzero()))
self._row_col_degrees = (
np.asarray(graph.sum(1)).squeeze(),
np.asarray(graph.sum(0)).squeeze(),
)
self._X = sp.csr_matrix(graph)
if self._use_gpu:
self._X = cupyx.scipy.sparse.csr_matrix(self._X.astype(float))
# Instantiate and training first model.
if self._model_type == "LBM":
model = LBM(
1,
1,
max_iter=5000,
n_init=1,
n_init_total_run=1,
n_iter_early_stop=1,
verbosity=0,
use_gpu=self._use_gpu,
gpu_index=self._gpu_index,
)
model.fit(graph)
else:
model = SBM(
1,
max_iter=5000,
n_init=1,
n_init_total_run=1,
n_iter_early_stop=1,
verbosity=0,
use_gpu=self._use_gpu,
gpu_index=self._gpu_index,
)
model.fit(graph, symmetric=symmetric)
nnq = (
model.n_row_clusters + model.n_column_clusters
if self._model_type == "LBM"
else model.n_clusters
)
self.model_explored = {
nnq: {
"split_explored": False,
"merge_explored": True,
"model": model,
"icl": model.get_ICL(),
}
}
best_icl = [self.selected_model.get_ICL()]
try:
while not np.all(
[
[m["merge_explored"], m["split_explored"]]
for m in self.model_explored.values()
]
):
logger.info("Spliting")
self.model_explored = self._explore_strategy(strategy="split")
logger.info("Merging")
self.model_explored = self._explore_strategy(strategy="merge")
best_iter_model = self.selected_model
best_icl.append(best_iter_model.get_ICL())
logger.info("Best icl is {:.4f}".format(best_icl[-1]))
if len(best_icl) > 3 and best_icl[-3] == best_icl[-1]:
break
except KeyboardInterrupt:
pass
if self._plot:
figure, _ = self._figure
plt.close(figure)
return self.selected_model
def __repr__(self) -> str:
return f"""ModelSelection(
graph=<{type(self.graph).__name__} at {hex(id(self.graph))}>,
model_type={self._model_type},
use_gpu={self._use_gpu},
symmetric={self._symmetric},
)"""
def _explore_strategy(self, strategy: str):
""" Perform a splitting or merging strategy.
The splitting strategy stops when the number of classes is greater
than min(1.5*number of classes of the best model,
number of classes of the best model + 10, number of classes max).
The merging strategy stops when the minimum relevant number of
classes is reached.
Parameters
----------
strategy : {'merge', 'split'}
The type of strategy.
Returns
-------
model_explored: dict of {int: dict}
All the models explored by the strategy. Keys of model_explored is
the number of classes. The values are dict containing the model,
its ICL value, two flags merge_explored and split_explored.
"""
assert strategy in ["merge", "split"]
# Getting the first model to explore, different according to the strategy.
pv_model = ( # Number of classes, different according to the model LBM/SBM.
self.model_explored[max(self.model_explored.keys())]
if strategy == "merge"
else self.model_explored[min(self.model_explored.keys())]
)
nnq_best_model = (
(
pv_model["model"].n_row_clusters
+ pv_model["model"].n_column_clusters
)
if self._model_type == "LBM"
else pv_model["model"].n_clusters
)
model_explored = {} # All models explored for the current strategy.
best_model = pv_model # Best model of the current strategy.
models_to_explore = [pv_model]
while models_to_explore:
model_flag = models_to_explore.pop(0)
nnq = ( # Number of classes, different according to the model LBM/SBM.
model_flag["model"].n_row_clusters
+ model_flag["model"].n_column_clusters
if self._model_type == "LBM"
else model_flag["model"].n_clusters
)
model_explored[nnq] = model_flag
if self._plot:
_plot_merge_split_graph(
self, model_explored, strategy, best_model
)
flag_key = (
"merge_explored" if strategy == "merge" else "split_explored"
)
classes_key = (nnq - 1) if strategy == "merge" else (nnq + 1)
if model_flag[flag_key]:
if classes_key in self.model_explored:
models_to_explore.append(self.model_explored[classes_key])
if (
self.model_explored[classes_key]["icl"]
> best_model["icl"]
):
best_model = self.model_explored[classes_key]
nnq_best_model = (
(
best_model["model"].n_row_clusters
+ best_model["model"].n_column_clusters
)
if self._model_type == "LBM"
else best_model["model"].n_clusters
)
logger.info(
"\t Already explored models from {} classes".format(
nnq
)
)
continue
model_flag[flag_key] = True
logger.info("\t Explore models from {} classes".format(nnq))
if self._model_type == "LBM":
# Explore all models derived from the strategy on the rows.
r_icl, r_model = self._select_and_train_best_model(
model_flag["model"], strategy=strategy, type=0 # rows
)
# Explore all models derived from the strategy on the columns.
c_icl, c_model = self._select_and_train_best_model(
model_flag["model"], strategy=strategy, type=1 # columns
)
else:
r_icl, r_model = self._select_and_train_best_model(
model_flag["model"], strategy=strategy
)
c_icl, c_model = (-np.inf, None)
best_models = [
{
"model": r_model,
"merge_explored": False,
"split_explored": False,
"icl": r_icl,
},
{
"model": c_model,
"merge_explored": False,
"split_explored": False,
"icl": c_icl,
},
]
# Adding the model from previous strategy.
if classes_key in self.model_explored:
best_models = [self.model_explored[classes_key]] + best_models
best_models.sort(key=lambda x: x["icl"], reverse=True)
best_models = [d for d in best_models if not np.isinf(d["icl"])]
if best_models:
bfm = best_models[0]
nnq_bm = (
bfm["model"].n_row_clusters
+ bfm["model"].n_column_clusters
if self._model_type == "LBM"
else bfm["model"].n_clusters
)
if bfm["icl"] > best_model["icl"]:
best_model = bfm
nnq_best_model = (
(
best_model["model"].n_row_clusters
+ best_model["model"].n_column_clusters
)
if self._model_type == "LBM"
else best_model["model"].n_clusters
)
if strategy == "split" and (
(nnq_bm)
< min(
1.5 * (nnq_best_model),
nnq_best_model + 10,
self.n_clusters_max,
)
or nnq_bm < 4
):
models_to_explore.append(bfm)
elif strategy == "split":
bfm["split_explored"] = True
model_explored[nnq_bm] = bfm
if strategy == "merge" and (nnq_bm) > 3:
models_to_explore.append(bfm)
elif strategy == "merge":
bfm["merge_explored"] = True
model_explored[nnq_bm] = bfm
return model_explored
def _select_and_train_best_model(
self, model: Union[LBM, SBM], strategy: str, type: int = None
) -> Tuple[float, Union[LBM, SBM]]:
""" Given model and a strategy, perform all possible merges/splits of
classes and return the best one.
The algorithm instantiate all merges/splits possible, n best models are
selected and trained for a few steps and the best of them is trained until
convergence.
Parameters
----------
model : sparsebm.LBM or sparsebm.SBM
The model from which all merges/splits are tested.
strategy : {'merge', 'split'}
The type of strategy.
type : int, optional
0 for rows merging/splitting, 1 for columns merging/splitting
Returns
-------
tuple of (float, sparsebm.LBM or sparsebm.SBM)
The higher ICL value and its associated model, from all merges/splits.
"""
assert strategy in ["merge", "split"]
if self._model_type == "LBM":
assert type in [0, 1]
nb_clusters = (
model.n_row_clusters if type == 0 else model.n_column_clusters
)
if strategy == "merge" and (
(type == 0 and model.n_row_clusters <= 1)
or (type == 1 and model.n_column_clusters <= 1)
):
return (-np.inf, None)
else:
nb_clusters = model.n_clusters
if strategy == "merge" and nb_clusters <= 1:
return (-np.inf, None)
# Getting all possible models from merge or split.
if strategy == "merge":
if self._model_type == "LBM":
models = [
lbm_merge_group(
model.copy(),
type=type,
idx_group_1=a,
idx_group_2=b,
indices_ones=self._indices_ones,
)
for b in range(nb_clusters)
for a in range(b)
]
else:
models = [
sbm_merge_group(
model.copy(),
idx_group_1=a,
idx_group_2=b,
indices_ones=self._indices_ones,
)
for b in range(nb_clusters)
for a in range(b)
]
else:
if self._model_type == "LBM":
models = [
lbm_split_group(
model.copy(),
self._row_col_degrees,
type=type,
index=i,
indices_ones=self._indices_ones,
)
for i in range(nb_clusters)
]
else:
models = [
sbm_split_group(
model.copy(),
self._row_col_degrees[0],
index=i,
indices_ones=self._indices_ones,
)
for i in range(nb_clusters)
]
models.sort(key=lambda x: x[0], reverse=True)
models = [(ic, m) for ic, m in models if not np.isinf(ic)]
if not models:
return (-np.inf, None)
# Five best models are selected and trained for a few EM steps.
for ic, m in models[:5]:
if self._model_type == "LBM":
m._fit_single(
self._X,
self._indices_ones,
self.graph.shape[0],
self.graph.shape[1],
init_params=True,
in_place=True,
early_stop=15,
)
else:
m._fit_single(
self._X,
self._indices_ones,
self.graph.shape[0],
init_params=True,
in_place=True,
early_stop=15,
)
models = [(m.get_ICL(), m) for _, m in models[:5]]
models.sort(key=lambda x: x[0], reverse=True)
# The best model is trained until convergence.
if self._model_type == "LBM":
models[0][1]._fit_single(
self._X,
self._indices_ones,
self.graph.shape[0],
self.graph.shape[1],
init_params=True,
in_place=True,
)
else:
models[0][1]._fit_single(
self._X,
self._indices_ones,
self.graph.shape[0],
init_params=True,
in_place=True,
)
return (models[0][1].get_ICL(), models[0][1])
def _plot_merge_split_graph(
model_selection, model_explored, strategy, best_model_current_strategy
):
# figure = plt.figure(figsize=(5, 1))
figure, ax = model_selection._figure
ax.cla()
if model_selection._model_type == "LBM":
currently_explored_model = (
model_explored[min(model_explored.keys())]["model"]
if strategy == "merge"
else model_explored[max(model_explored.keys())]["model"]
)
currently_explored_nqnl = (
[
(
currently_explored_model.n_row_clusters - 1,
currently_explored_model.n_column_clusters,
),
(
currently_explored_model.n_row_clusters,
currently_explored_model.n_column_clusters - 1,
),
]
if strategy == "merge"
else [
(
currently_explored_model.n_row_clusters + 1,
currently_explored_model.n_column_clusters,
),
(
currently_explored_model.n_row_clusters,
currently_explored_model.n_column_clusters + 1,
),
]
)
nqs = [m["model"].n_row_clusters for m in model_explored.values()]
nls = [m["model"].n_column_clusters for m in model_explored.values()]
nqs_prev = [
m["model"].n_row_clusters
for m in model_selection.model_explored.values()
]
nls_prev = [
m["model"].n_column_clusters
for m in model_selection.model_explored.values()
]
ax.set_xlim((0, max(10, max(nqs), max(nqs_prev))))
ax.set_ylim((0, max(10, max(nls), max(nls_prev))))
if strategy == "merge":
ax.set_title("Merging step")
else:
ax.set_title("Spliting step")
ax.set_ylabel("Number of column groups")
ax.set_xlabel("Number of row groups")
ax.grid()
ax.scatter(
nqs_prev,
nls_prev,
s=100,
c="grey",
marker="+",
label="Models explored during previous step",
)
ax.scatter(
nqs,
nls,
s=70,
c="orange",
marker="o",
label="Models explored at current step",
)
ax.scatter(
[model_selection.selected_model.n_row_clusters],
[model_selection.selected_model.n_column_clusters],
s=120,
c="black",
marker="*",
label="Current optimal model",
)
ax.annotate(
str(round(model_selection.selected_model.get_ICL(), 2)),
xy=(
model_selection.selected_model.n_row_clusters - 0.5,
model_selection.selected_model.n_column_clusters + 0.25,
),
)
else:
nqs = [m["model"].n_clusters for m in model_explored.values()]
icls = [m["model"].get_ICL() for m in model_explored.values()]
nqs_prev = [
m["model"].n_clusters
for m in model_selection.model_explored.values()
]
icls_prev = [
m["model"].get_ICL()
for m in model_selection.model_explored.values()
]
ax.set_xlim((0, max(10, max(nqs), max(nqs_prev))))
if strategy == "merge":
ax.set_title("Merging step")
else:
ax.set_title("Spliting step")
ax.set_ylabel("ICL")
ax.set_xlabel("Number of row groups")
ax.grid()
ax.scatter(
nqs_prev,
icls_prev,
s=100,
c="grey",
marker="+",
label="Models explored during previous step",
)
ax.scatter(
nqs,
icls,
s=70,
c="orange",
marker="o",
label="Models explored at current step",
)
ax.scatter(
[model_selection.selected_model.n_clusters],
[model_selection.selected_model.get_ICL()],
s=120,
c="black",
marker="*",
label="Current optimal model",
)
ax.legend()
plt.pause(0.01)
|
from enum import Enum
# Clockwise
class Direction(Enum):
NORTH = 1
EAST = 2
SOUTH = 3
WEST = 4 |
# coding: utf-8
from sqlalchemy import BigInteger, Column, Date, ForeignKey, Index, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Meetingdate(Base):
__tablename__ = 'MeetingDate'
__table_args__ = (
Index('MeetingDate_id_date_key', 'id', 'date'),
)
id = Column(BigInteger, primary_key=True)
date = Column(Date, nullable=False, unique=True)
class Token(Base):
__tablename__ = 'Token'
__table_args__ = (
Index('Token_token_dateid_key', 'token', 'dateid'),
)
tokenid = Column(BigInteger, primary_key=True)
count = Column(Integer, nullable=False, server_default=u'0')
dateid = Column(ForeignKey('MeetingDate.id'))
token = Column(String(50))
MeetingDate = relationship(u'Meetingdate')
class Tokenlink(Base):
__tablename__ = 'TokenLinks'
__table_args__ = (
Index('TokenLinks_source_target_index_distance_dateid_key', 'source', 'target', 'index', 'distance', 'dateid'),
)
dateid = Column(ForeignKey('MeetingDate.id'), nullable=False)
source = Column(ForeignKey('Token.tokenid'), nullable=False)
target = Column(ForeignKey('Token.tokenid'), nullable=False)
distance = Column(Integer, nullable=False)
index = Column(Integer, nullable=False)
linkid = Column(BigInteger, primary_key=True)
MeetingDate = relationship(u'Meetingdate')
Token = relationship(u'Token', primaryjoin='Tokenlink.source == Token.tokenid')
Token1 = relationship(u'Token', primaryjoin='Tokenlink.target == Token.tokenid')
|
"""
PGD attacks
"""
import torch
import torch.nn.functional as F
from torch.autograd import Variable
# ------------------------------------------------------------------------------
# PGD attack and its variants in the TripleWins paper
# ------------------------------------------------------------------------------
def PGD( \
x, preds, loss_fn, y=None, model=None, \
eps=None, steps=3, gamma=None, norm='linf', \
randinit=False, cuda=False, cnn=False, **kwargs):
# convert to cuda...
x_adv = x.clone()
if cuda: x_adv = x_adv.cuda()
# create an adv. example w. random init
if randinit:
x_rand = torch.rand(x_adv.shape)
if cuda: x_rand = x_rand.cuda()
x_adv += (2.0 * x_rand - 1.0) * eps
x_adv = Variable(x_adv, requires_grad=True)
# run steps
for t in range(steps):
out_adv_branch = model(x_adv) # use the main branch
if cnn:
loss_adv = loss_fn(out_adv_branch, y)
else:
loss_adv = loss_fn(out_adv_branch[-1], y)
grad = torch.autograd.grad(loss_adv, x_adv, only_inputs=True)[0]
# : compute based on the norm
if 'linf' == norm:
x_adv.data.add_(gamma * torch.sign(grad.data))
_linfball_projection(x, eps, x_adv, in_place=True)
elif 'l2' == norm:
x_add = grad.data / grad.data.view(x_adv.shape[0], -1)\
.norm(2, dim=-1).view(x_adv.shape[0], 1, 1, 1)
x_adv.data.add_(gamma * x_add)
x_adv = _l2_projection(x, eps, x_adv)
elif 'l1' == norm:
x_add = grad.data / grad.data.view(x_adv.shape[0], -1)\
.norm(1, dim=-1).view(x_adv.shape[0], 1, 1, 1)
x_adv.data.add_(gamma * x_add)
x_adv = _l1_projection(x, eps, x_adv)
else:
assert False, ('Error: undefined norm for the attack - {}'.format(norm))
x_adv = torch.clamp(x_adv, 0, 1)
return x_adv
def PGD_avg( \
x, preds, loss_fn, y=None, model=None, \
eps=None, steps=3, gamma=None, norm='linf', \
randinit=False, cuda=False, **kwargs):
# convert to cuda...
x_adv = x.clone()
if cuda: x_adv = x_adv.cuda()
# create an adv. example w. random init
if randinit:
x_rand = torch.rand(x_adv.shape)
if cuda: x_rand = x_rand.cuda()
x_adv += (2.0 * x_rand - 1.0) * eps
x_adv = Variable(x_adv, requires_grad=True)
# run steps
for t in range(steps):
out_adv_branch = model(x_adv)
loss_adv = 0
# : average the loss over the branches
for i in range(len(out_adv_branch)):
loss_adv += loss_fn(out_adv_branch[i], y) * (1.0/len(out_adv_branch))
grad = torch.autograd.grad(loss_adv, x_adv, only_inputs=True)[0]
# : bound based on the norm
if 'linf' == norm:
x_adv.data.add_(gamma * torch.sign(grad.data))
_linfball_projection(x, eps, x_adv, in_place=True)
elif 'l2' == norm:
x_add = grad.data / grad.data.view(x_adv.shape[0], -1)\
.norm(2, dim=-1).view(x_adv.shape[0], 1, 1, 1)
x_adv.data.add_(gamma * x_add)
x_adv = _l2_projection(x, eps, x_adv)
elif 'l1' == norm:
x_add = grad.data / grad.data.view(x_adv.shape[0], -1)\
.norm(1, dim=-1).view(x_adv.shape[0], 1, 1, 1)
x_adv.data.add_(gamma * x_add)
x_adv = _l1_projection(x, eps, x_adv)
else:
assert False, ('Error: undefined norm for the attack - {}'.format(norm))
x_adv = torch.clamp(x_adv, 0, 1)
return x_adv
def PGD_max( \
x, preds, loss_fn, y=None, model=None, \
eps=None, steps=3, gamma=None, norm='linf', \
randinit=False, cuda=False, **kwargs):
# convert to cuda...
x_advs = [x.clone() for _ in range(model.num_output)]
if cuda: x_advs = [each.cuda() for each in x_advs]
# create the adv. example w. random init
if randinit:
x_rands = [torch.rand(each.shape) for each in x_advs]
if cuda: x_rands = [each.cuda() for each in x_rands]
x_advs = [(each + (2.0 * each - 1.0) * eps) for each in x_advs]
x_advs = [Variable(each, requires_grad=True) for each in x_advs]
# run steps
for t in range(steps):
for i in range(model.num_output):
x_adv = x_advs[i]
out_adv_branch = model(x_adv)
out = out_adv_branch[i]
loss_adv = loss_fn(out, y)
grad = torch.autograd.grad(loss_adv, x_adv, only_inputs=True)[0]
# : bound based on the norm
if 'linf' == norm:
x_adv.data.add_(gamma * torch.sign(grad.data))
_linfball_projection(x, eps, x_adv, in_place=True)
elif 'l2' == norm:
x_add = grad.data / grad.data.view(x_adv.shape[0], -1)\
.norm(2, dim=-1).view(x_adv.shape[0], 1, 1, 1)
x_adv.data.add_(gamma * x_add)
x_adv = _l2_projection(x, eps, x_adv)
elif 'l1' == norm:
x_add = grad.data / grad.data.view(x_adv.shape[0], -1)\
.norm(1, dim=-1).view(x_adv.shape[0], 1, 1, 1)
x_adv.data.add_(gamma * x_add)
x_adv = _l1_projection(x, eps, x_adv)
else:
assert False, ('Error: undefined norm for the attack - {}'.format(norm))
x_adv = torch.clamp(x_adv, 0, 1)
x_advs[i] = x_adv
# record average losses for each adv samples
losses = []
for i in range(model.num_output):
x_adv = x_advs[i]
out_adv_branch = model(x_adv)
# : compute the average loss
for j in range(model.num_output):
out = out_adv_branch[j]
if j == 0:
loss_adv = F.cross_entropy(input=out, target=y, reduce=False)
else:
loss_adv += F.cross_entropy(input=out, target=y, reduce=False)
losses.append(loss_adv)
# choose the adv. sample by referencing average losses
losses = torch.stack(losses, dim=-1)
x_advs = torch.stack(x_advs, dim=1)
_, idxs = losses.topk(1, dim=-1)
idxs = idxs.long().view(-1, 1)
# hard-cord the image size - 3 x 32 x 32 for CIFAR-10
idxs = idxs.unsqueeze(2).unsqueeze(3).unsqueeze(4).repeat(1, 1, 3, 32, 32)
x_adv = torch.gather(x_advs, 1, idxs).squeeze(1)
return x_adv
def _tensor_clamp(t, min, max, in_place=True):
if not in_place:
res = t.clone()
else:
res = t
idx = res.data < min
res.data[idx] = min[idx]
idx = res.data > max
res.data[idx] = max[idx]
return res
"""
Norm-based projections (ell-1, ell-2, and ell-inf)
"""
def _l1_projection(x_base, epsilon, x_adv):
delta = x_adv - x_base
# consider the batch run
mask = delta.view(delta.shape[0], -1).norm(1, dim=1) <= epsilon
# compute the scaling factor
scaling_factor = delta.view(delta.shape[0], -1).norm(1, dim=1)
scaling_factor[mask] = epsilon
# scale delta based on the factor
delta *= epsilon / scaling_factor.view(-1, 1, 1, 1)
return (x_base + delta)
def _l2_projection(x_base, epsilon, x_adv):
delta = x_adv - x_base
# consider the batch run
mask = delta.view(delta.shape[0], -1).norm(2, dim=1) <= epsilon
# compute the scaling factor
scaling_factor = delta.view(delta.shape[0], -1).norm(2, dim=1)
scaling_factor[mask] = epsilon
# scale delta based on the factor
delta *= epsilon / scaling_factor.view(-1, 1, 1, 1)
return (x_base + delta)
def _linfball_projection(center, radius, t, in_place=True):
return _tensor_clamp(t, min=center - radius, max=center + radius, in_place=in_place)
|
n = input('numero: ')
print('unidade: ',n[3])
print('dezena: ',n[2])
print('centena: ',n[1])
print('milhar: ',n[0])
|
def max_subarray_cross(array,left,mid,right):
# For left of mid
inter_sum = 0
left_sum = 0
for i in range(mid-1,left-1,-1):
inter_sum += array[i]
if (inter_sum > left_sum):
left_sum = inter_sum
# For right of mid
inter_sum = 0
right_sum = 0
for i in range(mid,right+1,1):
inter_sum += array[i]
if (inter_sum > right_sum):
right_sum = inter_sum
return max(right_sum + left_sum, left_sum, right_sum)
def max_subarray(array,left,right):
if (left==right):
return array[left]
else:
mid = (left+right)//2
max_left = max_subarray(array,left,mid)
max_right = max_subarray(array,mid+1,right)
max_cross = max_subarray_cross(array,left,mid,right)
return max(max_left,max_right,max_cross)
# Driver Code
A = [-2, -3, 4, -1, -2, 1, 5, -3]
print(max_subarray(A,0,len(A)-1))
|
#!/usr/bin/env python
from distutils.core import setup
from setuptools.command.test import test as TestCommand
import sys
class Tox(TestCommand):
def finalize_options(self):
super(Tox, self).finalize_options(self)
self.test_args = []
self.test_suite = True
def run_test(self):
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
setup(
name='eHacktivities',
version='1.0',
description='eActivities Screen-Scraping API',
author='Luke Granger-Brown',
author_email='git@lukegb.com',
packages=['eactivities'],
tests_require=['tox'],
cmdclass = {'test': Tox},
)
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# This is an auto-generated file. Do not edit it.
"""
Provides Twisted version information.
"""
from twisted.python import versions
version = versions.Version('twisted.words', 15, 0, 0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from ab2cb import __version__
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
desc = 'ab2cb: convert AdBlock content filters to Safari Content Blockers'
here = os.path.abspath(os.path.dirname(__file__))
try:
long_description = open(os.path.join(here, 'docs/README.rst')).read()
except:
long_description = desc
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
'Topic :: Security',
'Topic :: Text Processing',
'Topic :: Utilities',
'Topic :: Text Processing :: Filters',
]
keywords = ['advertising', 'privacy', 'web']
platforms = ['macosx', 'linux', 'unix']
setup(
name='ab2cb',
version=__version__,
description=desc,
long_description=long_description,
author='Simon Blanchard',
author_email='bnomis@gmail.com',
license='MIT',
url='https://github.com/bnomis/ab2cb',
classifiers=classifiers,
keywords=keywords,
platforms=platforms,
packages=find_packages(exclude=['tests']),
entry_points={
'console_scripts': [
'ab2cb = ab2cb.ab2cb:run',
]
},
tests_require=['tox'],
cmdclass={
'test': Tox
},
)
|
"""
Copyright (c) 2020, Tidepool Project
All rights reserved.
"""
import logging
from .issue import JiraIssue
logger = logging.getLogger(__name__)
class JiraFuncRequirement(JiraIssue):
@property
def id(self):
return self.fields[self.jira.fields['reference_id']] or ''
@property
def risks(self):
all = set(super().risks)
logger.debug(f'direct risks attached to {self.key}: {len(all)}')
# aggregate indirect risks from linked stories
for story in self.stories:
indirect_risks = self.jira.get_issue(story.key, JiraIssue).risks
logger.debug(f'indirect risks through {story.key}: {len(indirect_risks)}')
all.update(indirect_risks)
return all
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import collections as coll
import typing as ty
import attr
from attr.validators import instance_of as is_a
from attr.validators import optional
from rnacentral_pipeline.databases.data import Entry
CHROMOSOME_LEVEL_MAPPINGS = set(
[
"WORMBASE",
]
)
DATABASES = {
"SRPDB",
"WormBase",
"lncRNAdb",
"snOPYdb",
"tmRNA-Website",
}
TpaKey = ty.Tuple[str, ty.Optional[str]]
def tpa_key(
value: ty.Union[Entry, "GenericTpa"], database: ty.Optional[str] = None
) -> TpaKey:
"""
Generate a key that can be used to map from a GenericTpa to an Entry.
"""
db_name = value.database
if database:
db_name = database
if db_name in CHROMOSOME_LEVEL_MAPPINGS:
if isinstance(value, Entry):
if db_name == "WORMBASE":
return (value.parent_accession, value.standard_name)
else:
return (value.parent_accession, value.locus_tag)
elif isinstance(value, GenericTpa):
return (value.parent_accession, value.locus_tag)
return (value.parent_accession, None)
def internal_database_name(ena_name: str) -> str:
name = ena_name.upper()
if name == "SNOPYDB":
return "SNOPY"
if name == "TMRNA-WEBSITE":
return "TMRNA_WEB"
return name
@attr.s(frozen=True, slots=True)
class GenericTpa(object):
database = attr.ib(validator=is_a(str))
database_accession = attr.ib(validator=is_a(str))
locus_tag = attr.ib(validator=optional(is_a(str)))
parent_accession = attr.ib(validator=is_a(str))
parent_secondary = attr.ib(validator=optional(is_a(str)))
@classmethod
def from_tsv(cls, row) -> "GenericTpa":
locus_tag = row["Source secondary accession"]
if not locus_tag:
locus_tag = None
secondary = row["Target secondary accession"]
if not secondary:
secondary = None
database = internal_database_name(row["Source"])
return cls(
database,
row["Source primary accession"],
locus_tag,
row["Target primary accession"],
secondary,
)
def accession(self, entry: Entry) -> str:
return f"{entry.accession}:{self.database}:{self.database_accession}"
def optional_id(self, entry: Entry) -> ty.Optional[str]:
xrefs = entry.xref_data.get("ena_refs", {})
if self.database in xrefs:
return xrefs[self.database][1]
return None
def transform(self, entry: Entry) -> Entry:
return attr.evolve(
entry,
primary_id=self.database_accession,
optional_id=self.optional_id(entry),
accession=self.accession(entry),
database=self.database,
is_composite="Y",
non_coding_id=entry.accession,
)
class UrlBuilder:
def sgd(self, entry) -> str:
return "http://www.yeastgenome.org/locus/%s/overview" % entry.primary_id
def srpdb(self, entry: Entry) -> str:
return (
"http://rnp.uthscsa.edu/rnp/SRPDB/rna/sequences/fasta/%s" % entry.primary_id
)
def wormbase(self, entry: Entry) -> str:
return "http://www.wormbase.org/species/c_elegans/gene/%s" % entry.primary_id
def dictybase(self, entry: Entry) -> str:
return "http://dictybase.org/gene/%s" % entry.primary_id
def lncrnadb(self, entry: Entry) -> str:
return "http://www.lncrnadb.org/Detail.aspx?TKeyID=%s" % entry.primary_id
def mirbase(self, entry: Entry) -> str:
return "http://www.mirbase.org/cgi-bin/mirna_entry.pl?acc=%s" % entry.primary_id
def snopy(self, entry: Entry) -> str:
return (
"http://snoopy.med.miyazaki-u.ac.jp/snorna_db.cgi?mode=sno_info&id=%s"
% entry.primary_id
)
def tmrna_web(self, entry: Entry) -> str:
return "http://bioinformatics.sandia.gov/tmrna/seqs/%s.html" % entry.primary_id
def transform(self, entry: Entry) -> Entry:
builder = getattr(self, entry.database.lower())
return attr.evolve(entry, url=builder(entry))
@attr.s()
class TpaMappings:
databases: set = attr.ib(default=attr.Factory(set))
simple_mapping: ty.Dict[TpaKey, ty.Set[GenericTpa]] = attr.ib(
default=attr.Factory(lambda: coll.defaultdict(set))
)
counts: ty.Dict[str, int] = attr.ib(default=attr.Factory(coll.Counter))
def add_tpas(self, tpas: ty.Iterable[GenericTpa]):
for tpa in tpas:
self.simple_mapping[tpa_key(tpa)].add(tpa)
self.databases.add(tpa.database)
self.counts[tpa.database] += 1
def has_tpa_for(self, entry: Entry) -> bool:
return any(self.find_tpas(entry))
def find_tpas(self, entry: Entry) -> ty.Iterable[GenericTpa]:
for database in self.databases:
key = tpa_key(entry, database=database)
tpas = self.simple_mapping.get(key, set())
for tpa in tpas:
yield tpa
if tpas:
break
def validate(self) -> bool:
dbs = [internal_database_name(db) for db in DATABASES]
failed = [db for db in dbs if not self.counts[db]]
if failed:
raise ValueError("No TPAs found for: %s" % ", ".join(failed))
return True
def parse_tpa_file(handle) -> ty.Iterable[GenericTpa]:
reader = csv.DictReader(handle, delimiter="\t")
for row in reader:
if row["Target"] != "sequence":
continue
yield GenericTpa.from_tsv(row)
def load(raw) -> TpaMappings:
mapping = TpaMappings()
mapping.add_tpas(parse_tpa_file(raw))
return mapping
def load_file(filename: str) -> TpaMappings:
with open(filename, "r") as raw:
return load(raw)
def apply(mapping: TpaMappings, entries: ty.Iterable[Entry]) -> ty.Iterable[Entry]:
urls = UrlBuilder()
for entry in entries:
if mapping.has_tpa_for(entry):
for tpa in mapping.find_tpas(entry):
updated = tpa.transform(entry)
yield urls.transform(updated)
else:
yield entry
|
from rest_framework import serializers
from openpersonen.api.enum import GeslachtsaanduidingChoices, OuderAanduiding
from .datum import DatumSerializer
from .in_onderzoek import OuderInOnderzoekSerializer
from .persoon import PersoonSerializer
class OuderSerializer(PersoonSerializer):
geslachtsaanduiding = serializers.ChoiceField(
choices=GeslachtsaanduidingChoices.choices, required=False
)
ouderAanduiding = serializers.ChoiceField(
choices=OuderAanduiding.choices, required=False
)
datumIngangFamilierechtelijkeBetrekking = DatumSerializer(required=False)
inOnderzoek = OuderInOnderzoekSerializer(required=False)
|
expected_output = {
"processor_pool": {"total": 10147887840, "used": 487331816, "free": 9660556024},
"reserve_p_pool": {"total": 102404, "used": 88, "free": 102316},
"lsmi_io_pool": {"total": 6295128, "used": 6294296, "free": 832},
}
|
#Juan Camilo Betancourt Giraldo
#Ingenieria de Sistemas
#Primer Semestre
def aportes (totalsalario, smm):
ingreso = totalsalario / smm
if ingreso <= 2:
totalaporte = totalsalario * 0.01
return totalaporte
if ingreso > 2 and ingreso <= 6:
totalaporte = totalsalario * 0.02
return totalaporte
if ingreso > 6:
totalaporte = totalsalario * 0.025
return totalaporte
def recreacion (totalsalario, tiempo):
if tiempo <= 5:
totalrecreacion = totalsalario * 0.0025
return totalrecreacion
if tiempo > 5:
totalrecreacion = totalsalario * 0.0030
return totalrecreacion
def fondo (totalsalario):
print ()
aporte = input ("desea aportar (4%) al fondo de solidaridad? <si-no> ")
if aporte == "si":
descuentofondo = totalsalario * 0.04
return descuentofondo
else:
return 0
def pension (totalsalario, tiempo, smm):
numsalario = totalsalario / smm
if tiempo >= 10:
valorpension = totalsalario * 0.07
return valorpension
if numsalario <= 3:
valorpension = totalsalario * 0.08
return valorpension
if numsalario > 3:
valorpension = totalsalario * 0.1
return valorpension
|
import pafy
from core import internals
from core import const
import os
import pprint
log = const.log
# Please respect this YouTube token :)
pafy.set_api_key('AIzaSyAnItl3udec-Q1d5bkjKJGL-RgrKO_vU90')
def go_pafy(raw_song, meta_tags=None):
""" Parse track from YouTube. """
if internals.is_youtube(raw_song):
track_info = pafy.new(raw_song)
else:
track_url = generate_youtube_url(raw_song, meta_tags)
if track_url:
track_info = pafy.new(track_url)
else:
track_info = None
return track_info
def get_youtube_title(content, number=None):
""" Get the YouTube video's title. """
title = content.title
if number:
return '{0}. {1}'.format(number, title)
else:
return title
def download_song(file_name, content):
""" Download the audio file from YouTube. """
_, extension = os.path.splitext(file_name)
if extension in ('.webm', '.m4a'):
link = content.getbestaudio(preftype=extension[1:])
else:
log.debug('No audio streams available for {} type'.format(extension))
return False
if link:
log.debug('Downloading from URL: ' + link.url)
filepath = os.path.join(const.args.folder, file_name)
log.debug('Saving to: ' + filepath)
link.download(filepath=filepath)
return True
else:
log.debug('No audio streams available')
return False
def generate_youtube_url(raw_song, meta_tags, tries_remaining=5):
""" Search for the song on YouTube and generate a URL to its video. """
# prevents an infinite loop but allows for a few retries
if tries_remaining == 0:
log.debug('No tries left. I quit.')
return
query = { 'part' : 'snippet',
'maxResults' : 50,
'type' : 'video' }
if const.args.music_videos_only:
query['videoCategoryId'] = '10'
if not meta_tags:
song = raw_song
query['q'] = song
else:
song = '{0} - {1}'.format(meta_tags['artists'][0]['name'],
meta_tags['name'])
query['q'] = song
log.debug('query: {0}'.format(query))
data = pafy.call_gdata('search', query)
query_results = {'part': 'contentDetails,snippet,statistics',
'maxResults': 50,
'id': ','.join(i['id']['videoId'] for i in data['items'])}
log.debug('query_results: {0}'.format(query_results))
vdata = pafy.call_gdata('videos', query_results)
videos = []
for x in vdata['items']:
duration_s = pafy.playlist.parseISO8591(x['contentDetails']['duration'])
youtubedetails = {'link': x['id'], 'title': x['snippet']['title'],
'videotime':internals.videotime_from_seconds(duration_s),
'seconds': duration_s}
videos.append(youtubedetails)
if not meta_tags:
break
if not videos:
return None
if const.args.manual:
log.info(song)
log.info('0. Skip downloading this song.\n')
# fetch all video links on first page on YouTube
for i, v in enumerate(videos):
log.info(u'{0}. {1} {2} {3}'.format(i+1, v['title'], v['videotime'],
"http://youtube.com/watch?v="+v['link']))
# let user select the song to download
result = internals.input_link(videos)
if not result:
return None
else:
if not meta_tags:
# if the metadata could not be acquired, take the first result
# from Youtube because the proper song length is unknown
result = videos[0]
log.debug('Since no metadata found on Spotify, going with the first result')
else:
# filter out videos that do not have a similar length to the Spotify song
duration_tolerance = 10
max_duration_tolerance = 20
possible_videos_by_duration = list()
'''
start with a reasonable duration_tolerance, and increment duration_tolerance
until one of the Youtube results falls within the correct duration or
the duration_tolerance has reached the max_duration_tolerance
'''
while len(possible_videos_by_duration) == 0:
possible_videos_by_duration = list(filter(lambda x: abs(x['seconds'] - meta_tags['duration']) <= duration_tolerance, videos))
duration_tolerance += 1
if duration_tolerance > max_duration_tolerance:
log.error("{0} by {1} was not found.\n".format(meta_tags['name'], meta_tags['artists'][0]['name']))
return None
result = possible_videos_by_duration[0]
if result:
url = "http://youtube.com/watch?v=" + result['link']
else:
url = None
return url
|
#!/usr/bin/env python
# coding: utf-8
"""
Tool for automagically updating the paths in visual studio project files.
Is part of the git commit hook, or can be just ran manually.
Takes no arguments, the paths are hardcoded here.
Tested to work with both python 2.7.9 and 3.4.3
(Python's default XML libraries are so horrible this just parses files as lines)
"""
from __future__ import unicode_literals
import fnmatch
import os
import io
vcxproj_name = 'teippi.vcxproj'
filters_name = 'Teippi.vcxproj.filters'
def matching_files(pattern):
ret = []
for root, _, filenames in os.walk('src'):
ret += [os.path.join(root, x) for x in fnmatch.filter(filenames, pattern)]
return sorted(ret)
def is_debug_only(file):
if not file.endswith('.cpp'):
return False
if 'src\\console\\' in file:
return True
if 'scconsole.cpp' in file or 'test_game.cpp' in file:
return True
return False
def main():
source = [(matching_files('*.cpp'), 'ClCompile', 'Source Files'),
(matching_files('*.h'), 'ClInclude', 'Header Files'),
(matching_files('*.hpp'), 'ClInclude', 'Source Files'),
(matching_files('*.txt'), 'Text', 'Source Files'),
(matching_files('*.py'), 'None', 'Source Files')]
out = ''
vcxproj = io.open(vcxproj_name, encoding='utf-8')
written_sources = False
skip_next_line = False
current_group_label = 'something'
for line in vcxproj:
if '<ItemGroup' in line:
if 'Label=' in line:
current_group_label = 'something'
else:
current_group_label = None
if skip_next_line:
skip_next_line = False
continue
if 'src' in line and 'Include' in line:
continue
if '<ExcludedFromBuild' in line:
skip_next_line = True
continue
if '</ItemGroup>' in line and not written_sources and current_group_label is None:
written_sources = True
for src in source:
for file in src[0]:
if is_debug_only(file):
out += ' <{} Include="{}">\n'.format(src[1], file)
out += ' <ExcludedFromBuild Condition='
out += '"\'$(Configuration)|$(Platform)\'==\'Release|Win32\'">true'
out += '</ExcludedFromBuild>\n'
out += ' </{}>\n'.format(src[1])
else:
out += ' <{} Include="{}" />\n'.format(src[1], file)
out += line
vcxproj.close()
file = open(vcxproj_name, 'wb')
file.write(out.encode('utf-8'))
out = ''
filters = io.open(filters_name, encoding='utf-8')
skip_count = 0
group_num = 0
for line in filters:
if skip_count != 0:
skip_count -= 1
continue
if 'src' in line and 'Include' in line:
skip_count = 2
continue
if '</ItemGroup>' in line:
group_num += 1
if '</ItemGroup>' in line and group_num == 2:
for src in source:
for file in src[0]:
out += ' <{} Include="{}">\n'.format(src[1], file)
dirname = os.path.dirname(file)
if dirname == 'src':
out += ' <Filter>{}</Filter>\n'.format(src[2])
else:
out += ' <Filter>{}\\{}</Filter>\n'.format(src[2], dirname[4:])
out += ' </{}>\n'.format(src[1])
out += line
filters.close()
file = open(filters_name, 'wb')
file.write(out.encode('utf-8'))
if __name__ == '__main__':
main()
|
# Copyright (c) 2019 NTT DATA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import shutil
import sys
from tacker.common import csar_utils
from tacker.conductor import conductor_server
from tacker import context
from tacker.glance_store import store as glance_store
from tacker import objects
from tacker.objects import vnf_package
from tacker.tests.unit.conductor import fakes
from tacker.tests.unit.db.base import SqlTestCase
from tacker.tests import uuidsentinel
class TestConductor(SqlTestCase):
def setUp(self):
super(TestConductor, self).setUp()
self.context = context.get_admin_context()
self.conductor = conductor_server.Conductor('host')
self.vnf_package = self._create_vnf_package()
def _create_vnf_package(self):
vnfpkgm = vnf_package.VnfPackage(context=self.context,
**fakes.VNF_PACKAGE_DATA)
vnfpkgm.create()
return vnfpkgm
@mock.patch.object(conductor_server.Conductor, '_onboard_vnf_package')
@mock.patch.object(conductor_server, 'revert_upload_vnf_package')
@mock.patch.object(csar_utils, 'load_csar_data')
@mock.patch.object(glance_store, 'load_csar')
def test_upload_vnf_package_content(self, mock_load_csar,
mock_load_csar_data,
mock_revert, mock_onboard):
mock_load_csar_data.return_value = (mock.ANY, mock.ANY)
mock_load_csar.return_value = '/var/lib/tacker/5f5d99c6-844a-4c3' \
'1-9e6d-ab21b87dcfff.zip'
self.conductor.upload_vnf_package_content(
self.context, self.vnf_package)
mock_load_csar.assert_called()
mock_load_csar_data.assert_called()
mock_onboard.assert_called()
@mock.patch.object(conductor_server.Conductor, '_onboard_vnf_package')
@mock.patch.object(glance_store, 'store_csar')
@mock.patch.object(conductor_server, 'revert_upload_vnf_package')
@mock.patch.object(csar_utils, 'load_csar_data')
@mock.patch.object(glance_store, 'load_csar')
def test_upload_vnf_package_from_uri(self, mock_load_csar,
mock_load_csar_data,
mock_revert, mock_store,
mock_onboard):
address_information = "http://test.zip"
mock_load_csar_data.return_value = (mock.ANY, mock.ANY)
mock_load_csar.return_value = '/var/lib/tacker/5f5d99c6-844a' \
'-4c31-9e6d-ab21b87dcfff.zip'
mock_store.return_value = 'location', 'size', 'checksum',\
'multihash', 'loc_meta'
self.conductor.upload_vnf_package_from_uri(self.context,
self.vnf_package,
address_information,
user_name=None,
password=None)
mock_load_csar.assert_called()
mock_load_csar_data.assert_called()
mock_store.assert_called()
mock_onboard.assert_called()
self.assertEqual('multihash', self.vnf_package.hash)
self.assertEqual('location', self.vnf_package.location_glance_store)
@mock.patch.object(glance_store, 'delete_csar')
def test_delete_vnf_package(self, mock_delete_csar):
self.vnf_package.__setattr__('onboarding_state', 'ONBOARDED')
self.conductor.delete_vnf_package(self.context, self.vnf_package)
mock_delete_csar.assert_called()
@mock.patch.object(os, 'remove')
@mock.patch.object(shutil, 'rmtree')
@mock.patch.object(os.path, 'exists')
@mock.patch.object(vnf_package.VnfPackagesList, 'get_by_filters')
def test_run_cleanup_vnf_packages(self, mock_get_by_filter,
mock_exists, mock_rmtree,
mock_remove):
vnf_package_data = {'algorithm': None, 'hash': None,
'location_glance_store': None,
'onboarding_state': 'CREATED',
'operational_state': 'DISABLED',
'tenant_id': uuidsentinel.tenant_id,
'usage_state': 'NOT_IN_USE',
'user_data': {'abc': 'xyz'}
}
vnfpkgm = objects.VnfPackage(context=self.context, **vnf_package_data)
vnfpkgm.create()
vnfpkgm.destroy(self.context)
mock_get_by_filter.return_value = [vnfpkgm]
mock_exists.return_value = True
conductor_server.Conductor('host')._run_cleanup_vnf_packages(
self.context)
mock_get_by_filter.assert_called()
mock_rmtree.assert_called()
mock_remove.assert_called()
@mock.patch.object(sys, 'exit')
@mock.patch.object(conductor_server.LOG, 'error')
@mock.patch.object(glance_store, 'initialize_glance_store')
@mock.patch.object(os.path, 'isdir')
def test_init_host(self, mock_isdir, mock_initialize_glance_store,
mock_log_error, mock_exit):
mock_isdir.return_value = False
self.conductor.init_host()
mock_log_error.assert_called()
mock_exit.assert_called_with(1)
self.assertIn("Config option 'vnf_package_csar_path' is not configured"
" correctly. VNF package CSAR path directory %s doesn't"
" exist", mock_log_error.call_args[0][0])
|
# -*- coding: utf-8 -*-
from xutils.custom_logger import (LogFormatter,
CustomLogger)
from xutils.string_utils import (to_unicode,
combinations)
from xutils.test_runner import (add_parent_path,
TestRunner)
from xutils.decorators import handle_exception
from xutils.config_utils import (find_file,
find_and_parse_config)
from xutils.assert_utils import (py_assert,
py_warning)
from xutils.date_utils import (Date,
Period,
Calendar,
is_tradetime_now,
today_date,
Schedule,
is_within_hour_range,
TimeUnits,
NormalizingType,
DateGeneration,
BizDayConventions,
Months,
Weekdays)
from xutils.misc import valid_dict
from xutils.bar_builder import (BarThread,
BarFrequency,
LiveFeed)
from xutils.job_runner import (SocketJob,
server_setup,
server_watch,
enum_windows_callback,
get_window_info)
from xutils.indicator import dual_thrust
__all__ = ['version',
'LogFormatter',
'CustomLogger',
'to_unicode',
'combinations',
'add_parent_path',
'TestRunner',
'handle_exception',
'find_file',
'find_and_parse_config',
'py_assert',
'py_warning',
'Date',
'Period',
'Calendar',
'Schedule',
'today_date',
'is_within_hour_range',
'TimeUnits',
'NormalizingType',
'DateGeneration',
'BizDayConventions',
'Months',
'Weekdays',
'valid_dict',
'BarThread',
'BarFrequency',
'LiveFeed',
'SocketJob',
'server_watch',
'server_setup',
'enum_windows_callback',
'get_window_info',
'dual_thrust',
'is_tradetime_now']
version = '0.5.7'
|
"""Dynamically generate Buildkite pipeline artifact based on git changes."""
import re
import os
import sys
import json
import fnmatch
import pathlib
import datetime
import functools
import subprocess
import collections
from typing import Dict, List, Tuple, Set, Generator, Callable, NoReturn, Union
import box
import pytz
import jsonschema
from ruamel import yaml
TAGS = List[Union[str, Tuple[str]]]
BOX_CONFIG = dict(frozen_box=True, default_box=True)
class BuildpipeException(Exception):
pass
def _listify(arg: Union[None, str, List[str], Tuple[str]]) -> List[Union[str, Tuple[str]]]:
"""Return a list of strings or tuples where argument can be multiple types"""
if arg is None or len(arg) == 0:
return []
elif isinstance(arg, str):
return [arg]
elif isinstance(arg, list) or isinstance(arg, tuple):
return list(arg)
else:
raise ValueError(f"Argument is neither None, string nor list. Found {arg}")
def _get_block(project: box.Box) -> List[Union[str, Tuple[str]]]:
# TODO: remove when block_steps is removed from schema
return _listify(project.block_stairs) + _listify(project.block_steps)
def get_git_branch() -> str:
branch = os.getenv('BUILDKITE_BRANCH')
if not branch:
try:
result = subprocess.run(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
stdout=subprocess.PIPE, check=True
)
branch = result.stdout.decode('utf-8').strip()
except Exception as e:
print(e)
sys.exit(-1)
return branch
def get_deploy_branch(config: box.Box) -> str:
return config.deploy.branch or 'master'
def get_changed_files(branch: str, deploy_branch: str, last_commit_only: bool) -> Set[str]:
commit = os.getenv('BUILDKITE_COMMIT') or branch
if branch == deploy_branch:
command = ['git', 'log', '-m', '-1', '--name-only', '--pretty=format:', commit]
else:
if last_commit_only:
command = ['git', 'log', '-1', '--name-only', '--no-merges', '--pretty=format:', 'origin..HEAD']
else:
command = ['git', 'log', '--name-only', '--no-merges', '--pretty=format:', 'origin..HEAD']
try:
result = subprocess.run(command, stdout=subprocess.PIPE, check=True)
changed = result.stdout.decode('utf-8').split('\n')
except Exception as e:
print(e)
sys.exit(-1)
if branch == deploy_branch:
try:
first_merge_break = changed.index('')
changed = changed[0:first_merge_break]
except ValueError:
pass
return {line for line in changed if line}
def _update_dicts(source: Dict, overrides: Dict) -> Dict:
for key, value in overrides.items():
if isinstance(value, collections.Mapping) and value:
returned = _update_dicts(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source
def buildkite_override(step_func: Callable) -> Callable:
@functools.wraps(step_func)
def func_wrapper(stair: box.Box, projects: Set[box.Box]) -> List[Dict]:
return [_update_dicts(step, stair.buildkite.to_dict()) for step in step_func(stair, projects)]
return func_wrapper
def generate_default_wait_step() -> List[str]:
return ['wait']
def generate_wait_step(stair: box.Box) -> List[str]:
if stair.continue_on_failure:
return [{'wait': None, 'continue_on_failure': True}]
else:
return generate_default_wait_step()
def generate_block_step(block: Dict, stair: box.Box, projects: Set[box.Box]) -> List[Dict]:
has_block = any(stair.name in _get_block(project) for project in projects)
return [block] if has_block else []
@buildkite_override
def generate_project_steps(stair: box.Box, projects: Set[box.Box]) -> List[Dict]:
steps = []
for project in projects:
step = {
'label': f'{stair.name} {project.name} {stair.emoji or project.emoji or ""}'.strip(),
'env': {
'BUILDPIPE_STAIR_NAME': stair.name,
'BUILDPIPE_STAIR_SCOPE': stair.scope,
'BUILDPIPE_PROJECT_NAME': project.name,
'BUILDPIPE_PROJECT_PATH': project.path,
# Deprecated environment variable names
# TODO: remove when cutover to new minor version
'STAIR_NAME': stair.name,
'STAIR_SCOPE': stair.scope,
'PROJECT_NAME': project.name,
'PROJECT_PATH': project.path,
# Add other environment variables specific to project
**(project.env or {})
}
}
if stair.deploy:
step['concurrency'] = 1
step['concurrency_group'] = f'{stair.name}-{project.name}'
steps.append(step)
return steps
@buildkite_override
def generate_stair_steps(stair: box.Box, projects: Set[box.Box]) -> List[Dict]:
return [{
'label': f'{stair.name} {stair.emoji or ""}'.strip(),
'env': {
'BUILDPIPE_STAIR_NAME': stair.name,
'BUILDPIPE_STAIR_SCOPE': stair.scope
}
}] if projects else []
def check_project_affected(changed_files: Set[str], project: box.Box) -> bool:
for path in [project.path] + list(project.get('dependencies', [])):
if path == '.':
return True
project_dirs = os.path.normpath(path).split('/')
for changed_file in changed_files:
changed_dirs = changed_file.split('/')
if changed_dirs[:len(project_dirs)] == project_dirs:
return True
return False
def get_affected_projects(branch: str, config: box.Box) -> Set[box.Box]:
deploy_branch = get_deploy_branch(config)
changed_files = get_changed_files(branch, deploy_branch, config.last_commit_only)
changed_with_ignore = {f for f in changed_files if not any(fnmatch.fnmatch(f, i) for i in config.get('ignore', []))}
return {p for p in config.projects if check_project_affected(changed_with_ignore, p)}
def iter_stairs(stairs: List[box.Box], can_autodeploy: bool) -> Generator[box.Box, None, None]:
for stair in stairs:
is_deploy = stair.deploy is True
if not is_deploy or (is_deploy and can_autodeploy):
yield stair
def check_autodeploy(deploy: Dict) -> bool:
now = datetime.datetime.now(pytz.timezone(deploy.get('timezone', 'UTC')))
check_hours = re.match(deploy.get('allowed_hours_regex', '\\d|1\\d|2[0-3]'), str(now.hour))
check_days = re.match(deploy.get('allowed_weekdays_regex', '[1-7]'), str(now.isoweekday()))
blacklist_dates = deploy.get('blacklist_dates')
check_dates = blacklist_dates is None or now.strftime('%m-%d') not in blacklist_dates
return all([check_hours, check_days, check_dates])
def validate_config(config: box.Box) -> bool:
schema_path = pathlib.Path(__file__).parent / 'schema.json'
with schema_path.open() as f_schema:
schema = json.load(f_schema)
try:
jsonschema.validate(json.loads(config.to_json()), schema)
except jsonschema.exceptions.ValidationError as e:
raise BuildpipeException("Invalid schema") from e
return True
def check_tag_rules(stair_tags: TAGS, project_tags: TAGS, project_skip_tags: TAGS) -> bool:
project_tags_set = set(_listify(project_tags))
project_skip_tags_set = set(_listify(project_skip_tags))
# Stairs that don't have tags allow any project
if len(stair_tags) == 0:
return True
# Iterate through stair tags and check if projects having matching tags
for stair_tag in stair_tags:
stair_tag_set = set(list(_listify(stair_tag)))
# Skip any steps a project wants to skip
if len(stair_tag_set & project_skip_tags_set) > 0:
return False
else:
if (stair_tag_set & project_tags_set) == stair_tag_set:
return True
return False
def iter_stair_projects(stair: box.Box, projects: Set[box.Box]) -> Generator[box.Box, None, None]:
stair_tags = _listify(stair.tags)
for project in projects:
project_tags = _listify(project.tags)
project_skip_tags = _listify(project.skip) + _listify(project.skip_stairs)
check_stair_name = stair.name not in project_skip_tags
if check_stair_name and check_tag_rules(stair_tags, project_tags, project_skip_tags):
yield project
def check_for_trigger_steps(build_steps):
for step in build_steps:
if 'trigger' in step:
step['build'] = {}
step['build']['env'] = step['env']
del step['env']
def compile_steps(config: box.Box) -> box.Box:
validate_config(config)
branch = get_git_branch()
projects = get_affected_projects(branch, config)
can_autodeploy = check_autodeploy(config.deploy.to_dict())
scope_fn = dict(project=generate_project_steps, stair=generate_stair_steps)
steps = []
previous_stair = box.Box({'continue_on_failure': False})
for stair in iter_stairs(config.stairs, can_autodeploy):
stair_projects = list(iter_stair_projects(stair, projects))
if stair_projects:
steps += generate_wait_step(previous_stair)
steps += generate_block_step(config.block.to_dict(), stair, stair_projects)
steps += scope_fn[stair.scope](stair, stair_projects)
check_for_trigger_steps(steps)
previous_stair = stair
return box.Box({'steps': steps})
def create_pipeline(infile: str, outfile: str, dry_run: bool = False) -> NoReturn:
config = box.Box.from_yaml(filename=infile, **BOX_CONFIG)
steps = compile_steps(config)
if not dry_run:
steps.to_yaml(filename=outfile, Dumper=yaml.dumper.SafeDumper)
|
# By design, pylint: disable=C0302
import threading
from typing import Any, Callable, Optional, Union, TypeVar, cast, overload
from rx.disposable import Disposable
from rx.concurrency import current_thread_scheduler
from ..observer import AutoDetachObserver
from .. import typing, abc
A = TypeVar('A')
B = TypeVar('B')
C = TypeVar('C')
D = TypeVar('D')
E = TypeVar('E')
F = TypeVar('F')
G = TypeVar('G')
class Observable(typing.Observable):
"""Observable base class.
Represents a push-style collection and contains all operators as
methods to allow classic Rx chaining of operators."""
def __init__(self, subscribe: Optional[Callable[[typing.Observer, Optional[typing.Scheduler]], typing.Disposable]] = None) -> None:
"""Creates an observable sequence object from the specified
subscription function.
Args:
subscribe: Subscribe method implementation.
"""
self.lock = threading.RLock()
self._subscribe = subscribe
super().__init__()
def _subscribe_core(self, observer: typing.Observer, scheduler: typing.Scheduler = None):
return self._subscribe(observer, scheduler) if self._subscribe else Disposable()
def __await__(self) -> Any:
"""Awaits the given observable.
Returns:
The last item of the observable sequence.
Raises:
TypeError: If key is not of type int or slice
"""
from ..operators.tofuture import _to_future
return iter(self.pipe(_to_future()))
def __add__(self, other):
"""Pythonic version of concat.
Example:
>>> zs = xs + ys
Returns:
self.concat(other)"""
from rx import concat
return concat(self, other)
def __getitem__(self, key):
"""Slices the given observable using Python slice notation. The
arguments to slice is start, stop and step given within
brackets [] and separated with the ':' character. It is
basically a wrapper around the operators skip(), skip_last(),
take(), take_last() and filter().
This marble diagram helps you remember how slices works with
streams. Positive numbers is relative to the start of the
events, while negative numbers are relative to the end (close)
of the stream.
r---e---a---c---t---i---v---e---|
0 1 2 3 4 5 6 7 8
-8 -7 -6 -5 -4 -3 -2 -1 0
Examples:
>>> result = source[1:10]
>>> result = source[1:-2]
>>> result = source[1:-1:2]
Args:
key: Slice object
Returns:
A sliced observable sequence.
"""
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
elif isinstance(key, int):
start, stop, step = key, key + 1, 1
else:
raise TypeError("Invalid argument type.")
from ..operators.slice import _slice
return _slice(start, stop, step)(self)
def __iadd__(self, other):
"""Pythonic use of concat.
Example:
xs += ys
Returns:
rx.concat(self, other)
"""
from rx import concat
return concat(self, other)
def subscribe(self, # pylint: disable=too-many-arguments,arguments-differ
observer: Optional[Union[typing.Observer, typing.OnNext]] = None,
on_error: Optional[typing.OnError] = None,
on_completed: Optional[typing.OnCompleted] = None,
on_next: Optional[typing.OnNext] = None,
*,
scheduler: Optional[typing.Scheduler] = None,
) -> typing.Disposable:
"""Subscribe an observer to the observable sequence.
Examples:
>>> source.subscribe()
>>> source.subscribe(observer)
>>> source.subscribe(observer, scheduler=scheduler)
>>> source.subscribe(on_next)
>>> source.subscribe(on_next, on_error)
>>> source.subscribe(on_next, on_error, on_completed)
>>> source.subscribe(on_next, on_error, on_completed, scheduler=scheduler)
Args:
observer: [Optional] The object that is to receive
notifications. You may subscribe using an observer or
callbacks, not both.
scheduler: [Optional] The default scheduler to use for this
subscription.
Returns:
Disposable object representing an observer's subscription
to the observable sequence.
"""
if observer:
if isinstance(observer, typing.Observer) or hasattr(observer, "on_next"):
on_next = cast(typing.Observer, observer).on_next
on_error = cast(typing.Observer, observer).on_error
on_completed = cast(typing.Observer, observer).on_completed
else:
on_next = observer
return self.subscribe_(on_next, on_error, on_completed, scheduler)
def subscribe_(self,
on_next: Optional[typing.OnNext] = None,
on_error: Optional[typing.OnError] = None,
on_completed: Optional[typing.OnCompleted] = None,
scheduler: Optional[typing.Scheduler] = None
) -> typing.Disposable:
"""Subscribe callbacks to the observable sequence.
Examples:
>>> source.subscribe_(on_next)
>>> source.subscribe_(on_next, on_error)
>>> source.subscribe_(on_next, on_error, on_completed)
Args:
on_next: Action to invoke for each element in the observable
sequence.
on_error: Action to invoke upon exceptional termination of
the observable sequence.
on_completed: Action to invoke upon graceful termination of
the observable sequence.
scheduler: The scheduler to use for this subscription.
Returns:
Disposable object representing an observer's subscription
to the observable sequence.
"""
auto_detach_observer = AutoDetachObserver(on_next, on_error, on_completed)
def fix_subscriber(subscriber):
"""Fixes subscriber to make sure it returns a Disposable instead
of None or a dispose function"""
if not hasattr(subscriber, "dispose"):
subscriber = Disposable(subscriber)
return subscriber
def set_disposable(_: abc.Scheduler = None, __: Any = None):
try:
subscriber = self._subscribe_core(auto_detach_observer, scheduler)
except Exception as ex: # By design. pylint: disable=W0703
if not auto_detach_observer.fail(ex):
raise
else:
auto_detach_observer.subscription = fix_subscriber(subscriber)
# Subscribe needs to set up the trampoline before for subscribing.
# Actually, the first call to Subscribe creates the trampoline so
# that it may assign its disposable before any observer executes
# OnNext over the CurrentThreadScheduler. This enables single-
# threaded cancellation
# https://social.msdn.microsoft.com/Forums/en-US/eb82f593-9684-4e27-
# 97b9-8b8886da5c33/whats-the-rationale-behind-how-currentthreadsche
# dulerschedulerequired-behaves?forum=rx
if current_thread_scheduler.schedule_required():
current_thread_scheduler.schedule(set_disposable)
else:
set_disposable()
# Hide the identity of the auto detach observer
return Disposable(auto_detach_observer.dispose)
@overload
def pipe(self, *operators: Callable[['Observable'], 'Observable']) -> 'Observable': # pylint: disable=no-self-use
"""Compose multiple operators left to right.
Composes zero or more operators into a functional composition.
The operators are composed to right. A composition of zero
operators gives back the original source.
source.pipe() == source
source.pipe(f) == f(source)
source.pipe(g, f) == f(g(source))
source.pipe(h, g, f) == f(g(h(source)))
...
Returns the composed observable.
"""
...
@overload
def pipe(self) -> 'Observable': # pylint: disable=function-redefined, no-self-use
... # pylint: disable=pointless-statement
@overload
def pipe(self, op1: Callable[['Observable'], A]) -> A: # pylint: disable=function-redefined, no-self-use
... # pylint: disable=pointless-statement
@overload
def pipe(self, # pylint: disable=function-redefined, no-self-use
op1: Callable[['Observable'], A],
op2: Callable[[A], B]) -> B:
... # pylint: disable=pointless-statement
@overload
def pipe(self, # pylint: disable=function-redefined, no-self-use
op1: Callable[['Observable'], A],
op2: Callable[[A], B],
op3: Callable[[B], C]) -> C:
... # pylint: disable=pointless-statement
@overload
def pipe(self, # pylint: disable=function-redefined, no-self-use
op1: Callable[['Observable'], A],
op2: Callable[[A], B],
op3: Callable[[B], C],
op4: Callable[[C], D]) -> D:
... # pylint: disable=pointless-statement
@overload
def pipe(self, # pylint: disable=function-redefined, no-self-use, too-many-arguments
op1: Callable[['Observable'], A],
op2: Callable[[A], B],
op3: Callable[[B], C],
op4: Callable[[C], D],
op5: Callable[[D], E]) -> E:
... # pylint: disable=pointless-statement
@overload
def pipe(self, # pylint: disable=function-redefined, no-self-use, too-many-arguments
op1: Callable[['Observable'], A],
op2: Callable[[A], B],
op3: Callable[[B], C],
op4: Callable[[C], D],
op5: Callable[[D], E],
op6: Callable[[E], F]) -> F:
... # pylint: disable=pointless-statement
@overload
def pipe(self, # pylint: disable=function-redefined, no-self-use, too-many-arguments
op1: Callable[['Observable'], A],
op2: Callable[[A], B],
op3: Callable[[B], C],
op4: Callable[[C], D],
op5: Callable[[D], E],
op6: Callable[[E], F],
op7: Callable[[F], G]) -> G:
... # pylint: disable=pointless-statement
# pylint: disable=function-redefined
def pipe(self, *operators: Callable[['Observable'], Any]) -> Any:
"""Compose multiple operators left to right.
Composes zero or more operators into a functional composition.
The operators are composed to right. A composition of zero
operators gives back the original source.
source.pipe() == source
source.pipe(f) == f(source)
source.pipe(g, f) == f(g(source))
source.pipe(h, g, f) == f(g(h(source)))
...
Returns the composed observable.
"""
from ..pipe import pipe
return pipe(*operators)(self)
def run(self) -> Any:
"""Run source synchronously.
Subscribes to the observable source. Then blocks and waits for the
observable source to either complete or error. Returns the
last value emitted, or throws exception if any error occurred.
Examples:
>>> result = run(source)
Raises:
SequenceContainsNoElementsError: if observable completes
(on_completed) without any values being emitted.
Exception: raises exception if any error (on_error) occurred.
Returns:
The last element emitted from the observable.
"""
from ..run import run
return run(self)
|
# Generated by Django 2.1.2 on 2018-10-11 19:00
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("pyazo_core", "0010_auto_20181011_1854"),
]
operations = [
migrations.AlterField(
model_name="upload",
name="collection",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="pyazo_core.Collection",
),
),
]
|
import random
ordleatters=[ord(a),ord(b),ord(c),ord(d),ord(e),ord(f),ord(g),ord(h),ord(i),ord(j),ord(k),ord(l),ord(m),ord(n),ord(o),ord(p),ord(q),ord(r),ord(s),ord(t),ord(u),ord(v),ord(w),ord(x),ord(y),ord(z)]
leatters=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
penctutions = ".,;?()[]{}&_-@%<>:!~1234567890/*+$#^/"
newt = "" #text without penctutions
List1=[]
odds=[]
text = str("Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of de Finibus Bonorum et Malorum (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, Lorem ipsum dolor sit amet.., comes from a line in section 1.10.32.The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from de Finibus Bonorum et Malorum by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.")
for char in text:
if char not in penctutions:
newt += char
for char in newt:
y = ord(char)
List1.append(y)
for i in List1:
if List1[i] % 2 == 1:
odds.append(List1[i])
def freq(index,letters,text):
count = 0
for i in range(le(text)):
if text[i] == letters[index]:
count += 1
return count
for k in range(len(letters)):
freq(k,ordletters,odds)
print(letters[k],':',count*'*')
print(odds)
|
from config import *
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
import re
ccm = SpotifyClientCredentials(client_id=spotify_client_id, client_secret=spotify_client_secret)
sp = spotipy.Spotify(client_credentials_manager=ccm)
def manager():
temp = get_start_data()
spotify_link, save_method = temp[0], temp[1]
temp = SpotifyParser(spotify_link=spotify_link, save_method=save_method).get_api_answer()
api_answer, spotify_method = temp[0], temp[1]
temp = SpotifyParser(spotify_method=spotify_method, api_answer=api_answer).get_tracks_data()
tracks_name, tracks_authors, preview_urls, tracks_urls = temp[0], temp[1], temp[2], temp[3]
temp = save_data(save_method=save_method, tracks_name=tracks_name, tracks_authors=tracks_authors, preview_urls=preview_urls, tracks_urls=tracks_urls)
print()
print(temp)
def get_start_data():
spotify_link = input("Enter the spotify link: ")
print()
print("Select the method of processing the result:")
print("1) Output to the terminal")
print("2) Save to file (result.txt)")
save_method = int(input("You have chosen the method: "))
print()
return (spotify_link, save_method)
def save_data(save_method=None, tracks_name=None, tracks_authors=None, preview_urls=None, tracks_urls=None):
if save_method == 1:
for i, track in enumerate(tracks_name):
temp_print = "=====================\n\n"
temp_print += f"Full name: {track} - {tracks_authors[i]}\n"
temp_print += f"Name: {track}\n"
temp_print += f"Author: {tracks_authors[i]}\n"
temp_print += f"Preview: {preview_urls[i]}\n"
temp_print += f"Track url: {tracks_urls[i]}\n"
print(temp_print)
print("=====================")
elif save_method == 2:
with open("result.txt", "w", encoding="utf-8") as file:
for i, track in enumerate(tracks_name):
temp_print = "=====================\n\n"
temp_print += f"Full name: {track} - {tracks_authors[i]}\n"
temp_print += f"Name: {track}\n"
temp_print += f"Author: {tracks_authors[i]}\n"
temp_print += f"Preview: {preview_urls[i]}\n"
temp_print += f"Track url: {tracks_urls[i]}\n\n"
file.write(temp_print)
file.write("=====================")
file.close()
print()
print("Write done!")
else:
return "no save method found! stoped!"
return "done"
class SpotifyParser:
def __init__(self, spotify_link=None, api_answer=None, save_method=None,
spotify_method=None, spotify_track_id=None, tracks_urls=None) -> None:
self.spotify_link = spotify_link
self.api_answer = api_answer
self.save_method = save_method
self.spotify_method = spotify_method
self.spotify_track_id = spotify_track_id
self.tracks_urls = tracks_urls
def get_api_answer(self):
if "playlist" in self.spotify_link:
api_answer = sp.playlist_items(self.spotify_link)
spotify_method = "playlist"
elif "album" in self.spotify_link:
api_answer = sp.album_tracks(self.spotify_link)
spotify_method = "album"
elif "track" in self.spotify_link:
api_answer = sp.track(self.spotify_link)
spotify_method = "track"
else:
return "no keywords found in the link"
return (api_answer, spotify_method)
def get_tracks_data(self):
if self.spotify_method == "playlist":
preview_urls = list()
tracks_name = list()
tracks_authors = list()
tracks_urls = list()
for item in self.api_answer.get('items'):
artists_name = list()
tracks_urls.append(item.get('track').get('external_urls').get('spotify'))
track_name = item.get('track').get('name')
for artists in item.get('track').get('artists'):
artists_name.append(artists.get('name'))
preview_urls.append(item.get('track').get('album').get('images')[0].get('url'))
tracks_name.append(track_name)
tracks_authors.append(', '.join(artists_name))
elif self.spotify_method == "album":
preview_urls = list()
tracks_name = list()
tracks_authors = list()
tracks_urls = list()
for item in self.api_answer.get('items'):
artists_name = list()
tracks_urls.append(item.get('external_urls').get('spotify'))
track_name = item.get('name')
for artists in item.get('artists'):
artists_name.append(artists.get('name'))
tracks_name.append(track_name)
tracks_authors.append(', '.join(artists_name))
preview_urls.append(SpotifyParser(spotify_track_id=self.api_answer.get('items')[0].get('id')).get_preview())
elif self.spotify_method == "track":
preview_urls = list()
tracks_name = list()
tracks_authors = list()
tracks_urls = list()
artists_name = list()
tracks_name.append(self.api_answer.get('name'))
tracks_urls.append(self.api_answer.get('external_urls').get('spotify'))
for artists in self.api_answer.get('artists'):
artists_name.append(artists.get('name'))
tracks_authors.append(', '.join(artists_name))
preview_urls.append(self.api_answer.get('album').get('images')[0].get('url'))
return (tracks_name, tracks_authors, preview_urls, tracks_urls)
def get_preview(self):
local_method = sp.track(self.spotify_track_id)
return local_method.get('album').get('images')[0].get('url')
if __name__ == "__main__":
manager() |
class Calculator:
def __init__(self, num1, num2):
self.num1 = num1
self.num2 = num2
def add(self):
return(self.num1 + self.num2)
def subtract(self):
return(self.num2 - self.num1)
def multiply(self):
return(self.num1 * self.num2)
def divide(self):
return(self.num2 / self.num1)
num1 = int(input('Digite o num1: '))
num2 = int(input('Digite o num2: '))
calculator = Calculator(num1, num2)
print("Adição:", calculator.add())
print("Subtração:", calculator.subtract())
print("Mutiplicação:", calculator.multiply())
print("Divisão:", calculator.divide())
|
import asab
import time
import logging
###
L = logging.getLogger(__name__)
###
class SystemManagerService(asab.Service):
"""
This class manages system messages in within the application
"""
def __init__(self, app):
super().__init__(app, "eaglestitch.SystemManagerService")
self.is_running = True
async def data_subscriber(self):
while True:
print(" ---- @ data_subscriber")
time.sleep(1)
async def publish_error(self, system_code, system_message):
sys_message_json = {
"code": system_code,
"message": system_message
}
self.App.PubSub.publish(
"eaglestitch.SystemPubSub.message!",
sys_message_json=sys_message_json,
asynchronously=True,
)
self.is_running = False
async def get_system_status(self):
return self.is_running
|
# Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mdts.lib.binding_manager import BindingManager
from mdts.lib.physical_topology_manager import PhysicalTopologyManager
from mdts.lib.virtual_topology_manager import VirtualTopologyManager
from mdts.tests.utils.asserts import async_assert_that
from mdts.tests.utils.asserts import receives
from mdts.tests.utils.asserts import should_NOT_receive
from mdts.tests.utils.asserts import within_sec
from mdts.tests.utils.utils import bindings
from mdts.tests.utils.utils import wait_on_futures
from nose.plugins.attrib import attr
import logging
import random
import time
LOG = logging.getLogger(__name__)
PTM = PhysicalTopologyManager(
'../topologies/mmm_physical_test_conn_tracking.yaml')
VTM = VirtualTopologyManager(
'../topologies/mmm_virtual_test_conn_tracking.yaml')
BM = BindingManager(PTM, VTM)
binding_multihost = {
'description': 'spanning across multiple MMs',
'bindings': [
{'binding':
{'device_name': 'bridge-000-001', 'port_id': 2,
'host_id': 1, 'interface_id': 1}},
{'binding':
{'device_name': 'bridge-000-001', 'port_id': 3,
'host_id': 2, 'interface_id': 2}},
]
}
def set_bridge_port_filters(bridge_name, port_id, inbound_filter_name,
outbound_filter_name):
'''Sets an in-bound filter to a bridge.'''
bridge_port = VTM.get_device_port(bridge_name, port_id)
inbound_filter = None
if inbound_filter_name:
inbound_filter = VTM.get_chain(inbound_filter_name)
outbound_filter = None
if outbound_filter_name:
outbound_filter = VTM.get_chain(outbound_filter_name)
bridge_port.set_inbound_filter(inbound_filter)
bridge_port.set_outbound_filter(outbound_filter)
# Sleep here to make sure that the settings have been propagated.
time.sleep(5)
def unset_bridge_port_filters(bridge_name, port_id):
'''Sets an in-bound filter to a bridge.'''
set_bridge_port_filters(bridge_name, port_id, None, None)
def get_random_port_num():
'''Returns a random port number from a free port range.
NOTE: Using a random number may cause test indeterminacy on a rare occasion.
'''
return random.randint(49152, 65535)
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_filtering_by_network_address():
'''
Title: Tests packets filtering based on network address
Scenario:
When: A VM sends UDP packets to another host on the same bridge.
Then: The UDP packets reaches the receiver.
Then: Filtering rule chains based on network address (IP address) are set on
the bridge port that the receiver host is connected to.
And: The UDP packets from the same sender do NOT reach the receiver.
'''
sender = BM.get_iface_for_port('bridge-000-001', 2)
receiver = BM.get_iface_for_port('bridge-000-001', 3)
# Reset in/out-bound filters.
unset_bridge_port_filters('bridge-000-001', 3)
port_num = get_random_port_num()
# FIXME: do not use harcoded values!
f1 = async_assert_that(receiver,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'No filtering: receives UDP packets from sender.')
f2 = sender.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Set a filtering rule based on network address.
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_nw_in',
'connection_tracking_nw_out')
f1 = async_assert_that(receiver, should_NOT_receive(
'dst host 172.16.1.2 and udp',
within_sec(5)),
'Packets are filtered based on IP address.')
f2 = sender.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_connection_tracking_by_network_addres():
'''
Title: Tests NW address based connection tracking.
Scenario:
When: A VM, supposedly inside a FW, sends UDP packets to another host,
supposedly outside the FS, on the same bridge.
And: The host outside the FW receives the UDP packets.
Then: A connection-tracking-based peep hole is established.
And: The outside host now can send UDP packets to the inside host.
'''
outside = BM.get_iface_for_port('bridge-000-001', 2)
inside = BM.get_iface_for_port('bridge-000-001', 3)
# Set a filtering rule based on ip address.
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_nw_in',
'connection_tracking_nw_out')
# Send forward packets to set up a connection-tracking based peep hole in
# the filter.
port_num = get_random_port_num()
f1 = async_assert_that(outside,
receives('dst host 172.16.1.1 and udp',
within_sec(5)),
'Outside host receives forward packets from inside.')
f2 = inside.send_udp('aa:bb:cc:00:01:01', '172.16.1.1', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Verify the peep hole.
f1 = async_assert_that(inside,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'Outside host can send packets to inside '
'via a peep hole.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_filtering_by_dl():
'''
Title: Tests dl-based packet filtering.
Scenario:
When: A VM sends UDP packets to another host on the same bridge.
Then: The UDP packets reach the receiver without filtering rule chains.
Then: A filtering rule chain based on mac address is set on the bridge.
And: UDP packets from the same host do NOT reach the same destination host.
'''
outside = BM.get_iface_for_port('bridge-000-001', 2)
inside = BM.get_iface_for_port('bridge-000-001', 3)
# Reset an in-bound filter.
unset_bridge_port_filters('bridge-000-001', 3)
port_num = get_random_port_num()
f1 = async_assert_that(
inside,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'No filtering: inside receives UDP packets from outside.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Set a filtering rule based on mac addresses
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_dl_in',
'connection_tracking_dl_out')
f1 = async_assert_that(inside,
should_NOT_receive(
'dst host 172.16.1.2 and udp',
within_sec(5)),
'Packets are filtered based on mac address.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_connection_tracking_with_drop_by_dl():
'''
Title: Tests dl-based connection tracking.
Scenario:
When: A VM inside a FW sends UDP packets to a VM outside.
And: The outside receives the UDP packets.
Then: A connection-tracking-based peep hole is established.
And: The outside now can send UDP packets to the inside.
'''
outside = BM.get_iface_for_port('bridge-000-001', 2)
inside = BM.get_iface_for_port('bridge-000-001', 3)
# Set a filtering rule based on mac addresses
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_dl_in',
'connection_tracking_dl_out')
# Send forward packets to set up a connection-tracking based peep hole in
# the filter.
port_num = get_random_port_num()
f1 = async_assert_that(outside,
receives('dst host 172.16.1.1 and udp',
within_sec(5)),
'The outside host receives forward packets '
'from the inside.')
f2 = inside.send_udp('aa:bb:cc:00:01:01', '172.16.1.1', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Verify the peep hole.
f1 = async_assert_that(inside,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'The outside host can now send packets to the inside'
'via a peep hole.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
|
import os
class ReleaseConstants:
def __init__(self) -> None:
self.build_dir = F"../build"
self.release_dir = F"../build/releases"
self.xxx_release_notes_path = os.path.join(self.build_dir, F'relnotes_x.y.z.md')
self.release_notes_dir = os.path.join(self.build_dir, 'release_notes')
self.template_release_notes_path = os.path.join(self.release_notes_dir, F'relnotes_template.md')
release_constants = ReleaseConstants()
|
import unittest
import excalibur
import datetime
class TestTimeConversion(unittest.TestCase):
def test_convert_timestamp_between_timezones(self):
dt_obj = datetime.datetime(2019, 5, 1, 18, 20, 28)
dt_obj_converted = excalibur.convert_timestamp_between_timezones(
dt_obj,
excalibur.get_utc_timezone_name(),
excalibur.get_ny_timezone_name()
)
expected_dt_obj = datetime.datetime(2019, 5, 1, 14, 20, 28)
# expected_dt_obj = excalibur.set_datetime_obj_timezone(expected_dt_obj, excalibur.get_ny_timezone_name())
self.assertEqual(dt_obj_converted.replace(tzinfo=None), expected_dt_obj)
|
import argparse
from fmridenoise.interfaces.denoising import Denoise
from fmridenoise.interfaces.smoothing import Smooth
from nipype import Node, Workflow
from fmridenoise.interfaces.confounds import Confounds
from fmridenoise.pipelines import is_IcaAROMA, get_pipeline_path, load_pipeline_from_json
def run(output_dir: str, pipeline_name: str, fmri_file: str, conf_raw: str, conf_json: str):
pipeline = load_pipeline_from_json(get_pipeline_path(pipeline_name))
workflow = Workflow(name="test_workflow", base_dir=output_dir)
conf_node = Node(Confounds(
pipeline=pipeline,
conf_raw=conf_raw,
conf_json=conf_json,
subject="test",
task="test",
session="test",
output_dir=output_dir
), name="Confprep")
denoising_node = Node(Denoise(
pipeline=pipeline,
task="test",
output_dir=output_dir
), name="Denoise")
if not is_IcaAROMA(pipeline):
smoothing_node = Node(Smooth(
fmri_prep=fmri_file,
output_directory=output_dir
), name="Smooth")
workflow.connect([(smoothing_node, denoising_node, [("fmri_smoothed", "fmri_prep")])])
else:
denoising_node.inputs.fmri_prep_aroma = fmri_file
workflow.connect([
(conf_node, denoising_node, [("conf_prep", "conf_prep")])
])
workflow.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-o","--output_dir", required=True)
parser.add_argument("-p", "--pipeline_name", required=True)
parser.add_argument("-f", "--fmri_file", required=True)
parser.add_argument("-r", "--conf_raw", required=True)
parser.add_argument("-j", "--conf_json", required=True)
args = parser.parse_args()
run(args.output_dir, args.pipeline_name, args.fmri_file, args.conf_raw, args.conf_json) |
# helper functions
def format_show_info(show):
empty_placeholder = "—"
star_emoji = "★"
empty_star_emoji = "☆"
text = "_{name} ({start} - {end})_\
\nRating: {rating}\
\nGenres: _{genres}_\
\nRuntime: _{runtime}_\
\nStatus: _{status}_"
name = getattr(show, "name", None)
start = getattr(show, "premiered", None)
end = getattr(getattr(show, "previous_episode", None), "airdate", None)
rating = getattr(show, "rating", {}).get("average")
genres = getattr(show, "genres", None)
runtime = getattr(show, "runtime", None)
status = getattr(show, "status", None)
# some of these could've been done with the getattr call
# but None is an acceptable return value for gettattr
name = name if name else empty_placeholder
start = start[:4] if start else ""
genres = ", ".join(genres) if genres else empty_placeholder
runtime = str(runtime) + " minutes" if runtime else empty_placeholder
status = status if status else empty_placeholder
# only show end if show has ended
if status == "Ended":
end = end[:4]
else:
end = ""
# star rating out of five
if rating:
r = int(rating)//2
rating = star_emoji * r + empty_star_emoji * (5-r)
else:
rating = empty_placeholder
formatted_text = text.format(
name=name,
start=start,
end=end,
rating=rating,
genres=genres,
runtime=runtime,
status=status
)
return formatted_text
|
from wsbtrading.maths.maths import * |
import pickle
import os
import numpy as np
import json
import argparse
import os.path as op
import csv
def main():
random_seed = 0
np.random.seed(random_seed)
args = get_args()
text = dict()
with open(args.text_file, newline='') as rf:
reader = csv.reader(rf)
for row in reader:
text[row[0]] = row[1]
ids = list(text.keys())
ids_dict = train_test_split(ids)
data_dict = {'train': [], 'test': []}
for split, ids in ids_dict.items():
cnt = 0
for id in ids:
r = {'id': id, 'caption': text[id]}
a_fpath = op.join(args.a_path, id + '.npy')
v_fpath = op.join(args.v_path, id + '.npy')
if op.exists(a_fpath) and op.exists(v_fpath):
r['audio'] = np.load(a_fpath)
r['2d'] = np.load(v_fpath)
cnt += 1
data_dict[split].append(r)
print(f'{split}: total: {len(ids)} success: {cnt}')
for split, data in data_dict.items():
pickle_file = op.join(args.o_path, f'{split}_data.pickle')
with open(pickle_file, 'wb') as wf:
pickle.dump(data, wf)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--text_file', type=str, default='data/text/texts.csv',
help='text file path')
parser.add_argument('-v', '--v_path', type=str, default='features/video/',
help='video features dir path')
parser.add_argument('-a', '--a_path', type=str, default='features/audio/',
help='audio features dir path')
parser.add_argument('-o', '--o_path', type=str, default='pickle_files/',
help='output pickle files dir path')
return parser.parse_args()
def train_test_split(ids):
np.random.shuffle(ids)
num_train = int(0.8 * len(ids))
num_test = len(ids) - num_train
train_ids = ids[:num_train]
test_ids = ids[num_train:]
return {'train': train_ids, 'test': test_ids}
if __name__ == '__main__':
main() |
'''
List Mode:
Runtime: 40 ms, faster than 97.87% of Python3 online submissions for Two Sum.
Memory Usage: 14.3 MB, less than 53.95% of Python3 online submissions for Two Sum.
'''
'''
Tuple Mode:
Runtime: 52 ms, faster than 53.96% of Python3 online submissions for Two Sum.
Memory Usage: 14.3 MB, less than 51.63% of Python3 online submissions for Two Sum.
'''
'''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
[3,3]
6
->[0,1]
[3,2,4]
6
->[1,2]
'''
#Dictionary, or 'hashmap' implemented
#Some fast solutions involved sorting the input list. Interesting.
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
#nums=tuple(nums)
length=len(nums)
map={} #{值:下标}组成的字典 #nums[i]:i for i in range(length)
for i in range(length): #遍历数组nums
num=nums[i]
counterpart=target-num
if counterpart in map:
return [map[target-num],i]
map[num]=i
|
#!/usr/bin/env python
import os
import imp
import sys
import time
import inspect
import cProfile
import argparse
import threading
import traceback
import vivisect
import vivisect.cli as viv_cli
import envi.config as e_config
import envi.threads as e_threads
import vivisect.parsers as viv_parsers
def main():
parser = argparse.ArgumentParser(prog='vivbin', usage='%(prog)s [options] <workspace|binaries...>')
parser.add_argument('-M', '--module', dest='modname', default=None, action='store',
help='run the file listed as an analysis module in non-gui mode and exit')
parser.add_argument('-A', '--skip-analysis', dest='doanalyze', default=True, action='store_false',
help='Do *not* do an initial auto-analysis pass')
parser.add_argument('-B', '--bulk', dest='bulk', default=False, action='store_true',
help='Do *not* start the gui, just load, analyze and save')
parser.add_argument('-C', '--cprofile', dest='cprof', default=False, action='store_true',
help='Output vivisect performace profiling (cProfile) info')
parser.add_argument('-O', '--option', dest='option', default=None, action='store',
help='<secname>.<optname>=<optval> (optval must be json syntax)')
parser.add_argument('-p', '--parser', dest='parsemod', default=None, action='store',
help='Manually specify the parser module (pe/elf/blob/...)')
parser.add_argument('-s', '--storage', dest='storage_name', default=None, action='store',
help='Specify a storage module by name')
parser.add_argument('-v', '--verbose', dest='verbose', default=False, action='store_true',
help='Enable verbose mode')
parser.add_argument('-V', '--version', dest='version', default=None, action='store',
help='Add file version (if available) to save file name')
parser.add_argument('file', nargs='*')
args = parser.parse_args()
vw = viv_cli.VivCli()
vw.verbose = args.verbose
if args.option is not None:
if args.option in ('-h','?'):
print(vw.config.reprConfigPaths())
sys.exit(-1)
try:
vw.config.parseConfigOption(args.option)
except e_config.ConfigNoAssignment, e:
print(vw.config.reprConfigPaths() + "\n")
print(e)
print("syntax: \t-O <secname>.<optname>=<optval> (optval must be json syntax)")
sys.exit(-1)
except Exception, e:
print(vw.config.reprConfigPaths())
print("With entry: %s" % args.option)
print(e)
sys.exit(-1)
if args.storage_name is not None:
vw.setMeta("StorageModule", args.storage_name)
# If we're not gonna load files, no analyze
if args.file is None:
args.doanalyze = False
# Load in any additional files...
needanalyze = False
if args.file is not None:
for fname in args.file:
if args.parsemod == None:
args.parsemod = viv_parsers.guessFormatFilename(fname)
start = time.time()
if args.parsemod == 'viv':
vw.loadWorkspace(fname)
else:
needanalyze = True
vw.loadFromFile(fname, fmtname=args.parsemod)
end = time.time()
print('Loaded (%.4f sec) %s' % (end - start, fname))
if args.bulk:
if args.doanalyze:
if args.cprof:
cProfile.run("vw.analyze()")
else:
start = time.time()
vw.analyze()
end = time.time()
print "ANALYSIS TIME: %s" % (end-start)
if args.modname is not None:
module = imp.load_module("custom_analysis", file(modname, "rb"), modname, ('.py', 'U', 1))
module.analyze(vw)
print('stats: %r' % (vw.getStats(),))
print("Saving workspace: %s" % (vw.getMeta('StorageName')))
vw.saveWorkspace()
else:
import vivisect.qt.main as viv_qt_main
# If we are interactive, lets turn on extended output...
vw.verbose = True
if args.doanalyze and needanalyze:
e_threads.firethread(vw.analyze)()
viv_qt_main.main(vw)
if __name__ == '__main__':
try:
# psyco makes disasm much faster (2-3X)
import psyco
#psyco.log()
psyco.full()
except ImportError:
pass
main()
|
from pbpstats.resources.enhanced_pbp import Rebound
from pbpstats.resources.enhanced_pbp.live.enhanced_pbp_item import LiveEnhancedPbpItem
class LiveRebound(Rebound, LiveEnhancedPbpItem):
"""
Class for rebound events
"""
action_type = "rebound"
def __init__(self, *args):
super().__init__(*args)
@property
def missed_shot(self):
"""
returns :obj:`~pbpstats.resources.enhanced_pbp.field_goal.FieldGoal` or
:obj:`~pbpstats.resources.enhanced_pbp.free_throw.FreeThrow` object
for shot that was missed
:raises: :obj:`~pbpstats.resources.enhanced_pbp.rebound.EventOrderError`:
If rebound event is not immediately following a missed shot event.
"""
prev_event = self.previous_event
while prev_event is not None:
if prev_event.event_num == self.shot_action_number:
return prev_event
prev_event = prev_event.previous_event
raise Rebound.EventOrderError(
f"previous event: {self.previous_event} is not a missed free throw or field goal"
)
@property
def oreb(self):
"""
returns True if rebound is an offensive rebound, False otherwise
"""
return self.sub_type == "offensive"
@property
def is_placeholder(self):
"""
returns True if rebound is a placeholder event, False otherwise.
These are team rebounds on for example missed FT 1 of 2
"""
if hasattr(self, "qualifiers") and "deadball" in self.qualifiers:
return True
if (
hasattr(self.missed_shot, "is_flagrant_ft")
and self.missed_shot.is_flagrant_ft
):
return True
return False
|
CONF_MAINNET = {
"fullnode": "https://api.trongrid.io",
"event": "https://api.trongrid.io",
}
# The long running, maintained by the tron-us community
CONF_SHASTA = {
"fullnode": "https://api.shasta.trongrid.io",
"event": "https://api.shasta.trongrid.io",
"faucet": "https://www.trongrid.io/faucet",
}
# Maintained by the official team
CONF_NILE = {
"fullnode": "https://api.nileex.io",
"event": "https://event.nileex.io",
"faucet": "http://nileex.io/join/getJoinPage",
}
# Maintained by the official team
CONF_TRONEX = {
"fullnode": "https://testhttpapi.tronex.io",
"event": "https://testapi.tronex.io",
"faucet": "http://testnet.tronex.io/join/getJoinPage",
}
ALL = {
"mainnet": CONF_MAINNET,
"nile": CONF_NILE,
"shasta": CONF_SHASTA,
"tronex": CONF_TRONEX,
}
def conf_for_name(name: str) -> dict:
return ALL.get(name, None)
|
from telethon import TelegramClient
from telethon.hints import EntityLike
class Wipers:
@staticmethod
async def wipe(client: TelegramClient, entity: EntityLike, own: bool = True):
message_ids = [m.id async for m in client.iter_messages(entity, from_user='me' if own else None)]
await client.delete_messages(entity, message_ids)
return len(message_ids)
|
#!/usr/bin/env python
"""
Commands work with servers. (Hiss, boo.)
"""
import copy
import logging
from fabric.api import local, put, settings, require, run, sudo, task
from fabric.state import env
from jinja2 import Template
import app_config
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
"""
Setup
"""
@task
def setup():
"""
Setup servers for deployment.
This does not setup services or push to S3. Run deploy() next.
"""
require('settings', provided_by=['production', 'staging'])
require('branch', provided_by=['stable', 'master', 'branch'])
if not app_config.DEPLOY_TO_SERVERS:
logger.error('You must set DEPLOY_TO_SERVERS = True in your app_config.py before setting up the servers.')
return
install_google_oauth_creds()
create_directories()
create_virtualenv()
clone_repo()
checkout_latest()
install_requirements()
setup_logs()
def create_directories():
"""
Create server directories.
"""
require('settings', provided_by=['production', 'staging'])
run('mkdir -p %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
# run('mkdir -p /var/www/uploads/%(PROJECT_FILENAME)s' % app_config.__dict__)
def create_virtualenv():
"""
Setup a server virtualenv.
"""
require('settings', provided_by=['production', 'staging'])
run('virtualenv -p %(SERVER_PYTHON)s %(SERVER_VIRTUALENV_PATH)s' % app_config.__dict__)
run('source %(SERVER_VIRTUALENV_PATH)s/bin/activate' % app_config.__dict__)
def clone_repo():
"""
Clone the source repository.
"""
require('settings', provided_by=['production', 'staging'])
run('git clone %(REPOSITORY_URL)s %(SERVER_REPOSITORY_PATH)s' % app_config.__dict__)
if app_config.REPOSITORY_ALT_URL:
run('git remote add bitbucket %(REPOSITORY_ALT_URL)s' % app_config.__dict__)
@task
def checkout_latest(remote='origin'):
"""
Checkout the latest source.
"""
require('settings', provided_by=['production', 'staging'])
require('branch', provided_by=['stable', 'master', 'branch'])
run('cd %s; git fetch %s' % (app_config.SERVER_REPOSITORY_PATH, remote))
run('cd %s; git checkout %s; git pull %s %s' % (app_config.SERVER_REPOSITORY_PATH, env.branch, remote, env.branch))
@task
def install_requirements():
"""
Install the latest requirements.
"""
require('settings', provided_by=['production', 'staging'])
run('%(SERVER_VIRTUALENV_PATH)s/bin/pip install -U -r %(SERVER_REPOSITORY_PATH)s/requirements.txt' % app_config.__dict__)
run('cd %(SERVER_REPOSITORY_PATH)s; npm install' % app_config.__dict__)
@task
def setup_logs():
"""
Create log directories.
"""
require('settings', provided_by=['production', 'staging'])
sudo('mkdir %(SERVER_LOG_PATH)s' % app_config.__dict__)
sudo('chown ubuntu:ubuntu %(SERVER_LOG_PATH)s' % app_config.__dict__)
@task
def install_crontab():
"""
Install cron jobs script into cron.d.
"""
require('settings', provided_by=['production', 'staging'])
sudo('cp %(SERVER_REPOSITORY_PATH)s/crontab /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)
@task
def uninstall_crontab():
"""
Remove a previously install cron jobs script from cron.d
"""
require('settings', provided_by=['production', 'staging'])
sudo('rm /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)
@task
def install_google_oauth_creds():
"""
Install Google Oauth credentials file (global) from workinprivate repo
"""
run('git clone git@github.com:nprapps/workinprivate.git /tmp/workinprivate-tmp')
run('cp /tmp/workinprivate-tmp/.google_oauth_credentials %s' % app_config.GOOGLE_OAUTH_CREDENTIALS_PATH)
run('rm -Rf /tmp/workinprivate-tmp')
@task
def remove_google_oauth_creds():
"""
Remove Google oauth credentials file (global)
"""
run('rm %s' % app_config.GOOGLE_OAUTH_CREDENTIALS_PATH)
def delete_project():
"""
Remove the project directory. Invoked by shiva.
"""
run('rm -rf %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
"""
Configuration
"""
def _get_template_conf_path(service, extension):
"""
Derive the path for a conf template file.
"""
return 'confs/%s.%s' % (service, extension)
def _get_rendered_conf_path(service, extension):
"""
Derive the rendered path for a conf file.
"""
return 'confs/rendered/%s.%s.%s' % (app_config.PROJECT_FILENAME, service, extension)
def _get_installed_conf_path(service, remote_path, extension):
"""
Derive the installed path for a conf file.
"""
return '%s/%s.%s.%s' % (remote_path, app_config.PROJECT_FILENAME, service, extension)
def _get_installed_service_name(service):
"""
Derive the init service name for an installed service.
"""
return '%s.%s' % (app_config.PROJECT_FILENAME, service)
@task
def render_confs():
"""
Renders server configurations.
"""
require('settings', provided_by=['production', 'staging'])
with settings(warn_only=True):
local('mkdir confs/rendered')
# Copy the app_config so that when we load the secrets they don't
# get exposed to other management commands
context = copy.copy(app_config.__dict__)
context.update(app_config.get_secrets())
for service, remote_path, extension in app_config.SERVER_SERVICES:
template_path = _get_template_conf_path(service, extension)
rendered_path = _get_rendered_conf_path(service, extension)
with open(template_path, 'r') as read_template:
with open(rendered_path, 'w') as write_template:
payload = Template(read_template.read())
write_template.write(payload.render(**context))
@task
def deploy_confs():
"""
Deploys rendered server configurations to the specified server.
This will reload nginx and the appropriate uwsgi config.
"""
require('settings', provided_by=['production', 'staging'])
render_confs()
with settings(warn_only=True):
for service, remote_path, extension in app_config.SERVER_SERVICES:
rendered_path = _get_rendered_conf_path(service, extension)
installed_path = _get_installed_conf_path(service, remote_path, extension)
a = local('md5 -q %s' % rendered_path, capture=True)
b = run('md5sum %s' % installed_path).split()[0]
if a != b:
logging.info('Updating %s' % installed_path)
put(rendered_path, installed_path, use_sudo=True)
if service == 'nginx':
sudo('service nginx reload')
elif service == 'uwsgi':
service_name = _get_installed_service_name(service)
sudo('initctl reload-configuration')
sudo('service %s restart' % service_name)
elif service == 'app':
run('touch %s' % app_config.UWSGI_SOCKET_PATH)
sudo('chmod 644 %s' % app_config.UWSGI_SOCKET_PATH)
sudo('chown www-data:www-data %s' % app_config.UWSGI_SOCKET_PATH)
else:
logging.info('%s has not changed' % rendered_path)
@task
def nuke_confs():
"""
DESTROYS rendered server configurations from the specified server.
This will reload nginx and stop the uwsgi config.
"""
require('settings', provided_by=['production', 'staging'])
for service, remote_path, extension in app_config.SERVER_SERVICES:
with settings(warn_only=True):
installed_path = _get_installed_conf_path(service, remote_path, extension)
sudo('rm -f %s' % installed_path)
if service == 'nginx':
sudo('service nginx reload')
elif service == 'uwsgi':
service_name = _get_installed_service_name(service)
sudo('service %s stop' % service_name)
sudo('initctl reload-configuration')
elif service == 'app':
sudo('rm %s' % app_config.UWSGI_SOCKET_PATH)
@task
def start_service(service):
"""
Start a service on the server.
"""
require('settings', provided_by=['production', 'staging'])
service_name = _get_installed_service_name(service)
sudo('service %s start' % service_name)
@task
def stop_service(service):
"""
Stop a service on the server
"""
require('settings', provided_by=['production', 'staging'])
service_name = _get_installed_service_name(service)
sudo('service %s stop' % service_name)
@task
def restart_service(service):
"""
Start a service on the server.
"""
require('settings', provided_by=['production', 'staging'])
service_name = _get_installed_service_name(service)
sudo('service %s restart' % service_name)
"""
Fabcasting
"""
@task
def fabcast(command):
"""
Actually run specified commands on the server specified
by staging() or production().
"""
require('settings', provided_by=['production', 'staging'])
if not app_config.DEPLOY_TO_SERVERS:
logging.error('You must set DEPLOY_TO_SERVERS = True in your app_config.py and setup a server before fabcasting.')
run('cd %s && bash run_on_server.sh fab %s $DEPLOYMENT_TARGET %s' % (app_config.SERVER_REPOSITORY_PATH, env.branch, command))
|
# coding: utf-8
"""
Deep Lynx
The construction of megaprojects has consistently demonstrated challenges for project managers in regard to meeting cost, schedule, and performance requirements. Megaproject construction challenges are common place within megaprojects with many active projects in the United States failing to meet cost and schedule efforts by significant margins. Currently, engineering teams operate in siloed tools and disparate teams where connections across design, procurement, and construction systems are translated manually or over brittle point-to-point integrations. The manual nature of data exchange increases the risk of silent errors in the reactor design, with each silent error cascading across the design. These cascading errors lead to uncontrollable risk during construction, resulting in significant delays and cost overruns. Deep Lynx allows for an integrated platform during design and operations of mega projects. The Deep Lynx Core API delivers a few main features. 1. Provides a set of methods and endpoints for manipulating data in an object oriented database. This allows us to store complex datatypes as records and then to compile them into actual, modifiable objects at run-time. Users can store taxonomies or ontologies in a readable format. 2. Provides methods for storing and retrieving data in a graph database. This data is structured and validated against the aformentioned object oriented database before storage. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.create_manual_import_response import CreateManualImportResponse # noqa: E501
from swagger_client.rest import ApiException
class TestCreateManualImportResponse(unittest.TestCase):
"""CreateManualImportResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCreateManualImportResponse(self):
"""Test CreateManualImportResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.create_manual_import_response.CreateManualImportResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
print('This program will provide a list of cubes for you. ')
minRange = int(input('What\'s the minimum for your range? '))
maxRange = int(input('What\'s the maximum for your range? '))
listOfCubes = []
for value in range(minRange, maxRange+1):
number = value**3
listOfCubes.append(number)
print('Your list of cubes in the range(' + str(minRange) + ', ' \
+str(maxRange) + ') is: ')
print(listOfCubes)
|
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
#
import os
import sys
import pytest
from pytest_bdd import (
scenarios,
given,
when,
then,
parsers,
)
from nebula2.common.ttypes import Value, NullType
from tests.tck.utils.nbv import register_function, parse
from tests.tck.utils.table import table
# You could register functions that can be invoked from the parsing text
register_function('len', len)
scenarios('../features')
@given(parsers.parse("A set of string:\n{text}"),
target_fixture="string_table")
def string_table(text):
return table(text)
@when('They are parsed as Nebula Value')
def parsed_as_values(string_table):
values = []
column_names = string_table['column_names']
for row in string_table['rows']:
cell = row[column_names[0]]
v = parse(cell)
assert v is not None, f"Failed to parse `{cell}'"
values.append(v)
string_table['values'] = values
@then('The type of the parsed value should be as expected')
def parsed_as_expected(string_table):
nvalues = string_table['values']
column_names = string_table['column_names']
for i, val in enumerate(nvalues):
type = val.getType()
if type == 0:
actual = 'EMPTY'
elif type == 1:
null = val.get_nVal()
if null == 0:
actual = 'NULL'
else:
actual = NullType._VALUES_TO_NAMES[val.get_nVal()]
else:
actual = Value.thrift_spec[val.getType()][2]
expected = string_table['rows'][i][column_names[1]]
assert actual == expected, f"expected: {expected}, actual: {actual}"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 10:49:52 2018
@author: marcmueller-stoffels
"""
from bs4 import BeautifulSoup
# open file and read into soup
infile_child = open('../../OutputData/Set0/Setup/Igiugig0Set2Setup.xml', "r") # open
contents_child = infile_child.read()
infile_child.close()
soup = BeautifulSoup(contents_child, 'xml') # turn into soup
|
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import unittest
from google.gax import errors
from google.cloud.gapic.logging.v2 import metrics_service_v2_client
from google.cloud.proto.logging.v2 import logging_metrics_pb2
from google.protobuf import empty_pb2
class CustomException(Exception):
pass
class TestMetricsServiceV2Client(unittest.TestCase):
@mock.patch('google.gax.config.create_stub', spec=True)
def test_list_log_metrics(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
parent = client.project_path('[PROJECT]')
# Mock response
next_page_token = ''
metrics_element = logging_metrics_pb2.LogMetric()
metrics = [metrics_element]
expected_response = logging_metrics_pb2.ListLogMetricsResponse(
next_page_token=next_page_token, metrics=metrics)
grpc_stub.ListLogMetrics.return_value = expected_response
paged_list_response = client.list_log_metrics(parent)
resources = list(paged_list_response)
self.assertEqual(1, len(resources))
self.assertEqual(expected_response.metrics[0], resources[0])
grpc_stub.ListLogMetrics.assert_called_once()
args, kwargs = grpc_stub.ListLogMetrics.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = logging_metrics_pb2.ListLogMetricsRequest(
parent=parent)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_list_log_metrics_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
parent = client.project_path('[PROJECT]')
# Mock exception response
grpc_stub.ListLogMetrics.side_effect = CustomException()
paged_list_response = client.list_log_metrics(parent)
self.assertRaises(errors.GaxError, list, paged_list_response)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_get_log_metric(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
metric_name = client.metric_path('[PROJECT]', '[METRIC]')
# Mock response
name = 'name3373707'
description = 'description-1724546052'
filter_ = 'filter-1274492040'
expected_response = logging_metrics_pb2.LogMetric(
name=name, description=description, filter=filter_)
grpc_stub.GetLogMetric.return_value = expected_response
response = client.get_log_metric(metric_name)
self.assertEqual(expected_response, response)
grpc_stub.GetLogMetric.assert_called_once()
args, kwargs = grpc_stub.GetLogMetric.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = logging_metrics_pb2.GetLogMetricRequest(
metric_name=metric_name)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_get_log_metric_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
metric_name = client.metric_path('[PROJECT]', '[METRIC]')
# Mock exception response
grpc_stub.GetLogMetric.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.get_log_metric, metric_name)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_create_log_metric(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
parent = client.project_path('[PROJECT]')
metric = logging_metrics_pb2.LogMetric()
# Mock response
name = 'name3373707'
description = 'description-1724546052'
filter_ = 'filter-1274492040'
expected_response = logging_metrics_pb2.LogMetric(
name=name, description=description, filter=filter_)
grpc_stub.CreateLogMetric.return_value = expected_response
response = client.create_log_metric(parent, metric)
self.assertEqual(expected_response, response)
grpc_stub.CreateLogMetric.assert_called_once()
args, kwargs = grpc_stub.CreateLogMetric.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = logging_metrics_pb2.CreateLogMetricRequest(
parent=parent, metric=metric)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_create_log_metric_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
parent = client.project_path('[PROJECT]')
metric = logging_metrics_pb2.LogMetric()
# Mock exception response
grpc_stub.CreateLogMetric.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.create_log_metric, parent,
metric)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_update_log_metric(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
metric_name = client.metric_path('[PROJECT]', '[METRIC]')
metric = logging_metrics_pb2.LogMetric()
# Mock response
name = 'name3373707'
description = 'description-1724546052'
filter_ = 'filter-1274492040'
expected_response = logging_metrics_pb2.LogMetric(
name=name, description=description, filter=filter_)
grpc_stub.UpdateLogMetric.return_value = expected_response
response = client.update_log_metric(metric_name, metric)
self.assertEqual(expected_response, response)
grpc_stub.UpdateLogMetric.assert_called_once()
args, kwargs = grpc_stub.UpdateLogMetric.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = logging_metrics_pb2.UpdateLogMetricRequest(
metric_name=metric_name, metric=metric)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_update_log_metric_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
metric_name = client.metric_path('[PROJECT]', '[METRIC]')
metric = logging_metrics_pb2.LogMetric()
# Mock exception response
grpc_stub.UpdateLogMetric.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.update_log_metric,
metric_name, metric)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_delete_log_metric(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
metric_name = client.metric_path('[PROJECT]', '[METRIC]')
client.delete_log_metric(metric_name)
grpc_stub.DeleteLogMetric.assert_called_once()
args, kwargs = grpc_stub.DeleteLogMetric.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = logging_metrics_pb2.DeleteLogMetricRequest(
metric_name=metric_name)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_delete_log_metric_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = metrics_service_v2_client.MetricsServiceV2Client()
# Mock request
metric_name = client.metric_path('[PROJECT]', '[METRIC]')
# Mock exception response
grpc_stub.DeleteLogMetric.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.delete_log_metric,
metric_name)
|
import os
import tkinter as tk
from collections import Counter
from tkinter.filedialog import askopenfilename
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import cv2 as cv
import numpy as np
from PIL import Image, ImageTk
from tensorflow.keras import models
from utils import decoder
IMAGE_FORMATS = [("JPEG", "*.jpg"), ("PNG", "*.png")]
SIZE = 512
ZOOM_MULT = 2 ** (1 / 2)
FINAL_SHAPE = 128, 64, 1
SLIDE_STEP = 1.5 / FINAL_SHAPE[0]
RATIO = FINAL_SHAPE[0] / FINAL_SHAPE[1]
MODEL_PATH = 'models/anpr.h5'
def get_possible_label(current, next_chars, labels):
if len(next_chars) > 0:
for char in next_chars[0]:
get_possible_label(current+char, next_chars[1:], labels)
return labels
else:
labels.append(current)
class App:
def __init__(self, master):
self.master = master
master.title("Automatic Number-Plate Recogintion")
if self.load_model():
self.image_text = tk.StringVar()
self.image_name = tk.Label(master, textvariable=self.image_text)
self.image_name.pack()
self.image_label = tk.Label(master)
self.image_label.pack()
self.prediction_text = tk.StringVar()
self.prediction_label = tk.Label(master, textvariable=self.prediction_text)
self.prediction_label.pack()
self.load_button = tk.Button(master, text="Load image", command=self.load_image)
self.load_button.pack()
self.load_button = tk.Button(master, text="Rotate", command=self.rotate_image)
self.load_button.pack()
self.zoom_slider = tk.Scale(master, from_=1, to=9, orient=tk.HORIZONTAL)
self.zoom_slider.pack()
self.zoom_slider.set(5)
self.load_button = tk.Button(master, text="Predict", command=self.predict)
self.load_button.pack()
def load_model(self):
if os.path.isfile(MODEL_PATH):
self.model = models.load_model(MODEL_PATH)
else:
self.error_label = tk.Label(self.master, text="Model was not found")
self.error_label.pack()
return False
return True
def load_image(self):
path = askopenfilename(filetypes=IMAGE_FORMATS)
self.image_text.set(os.path.split(path)[1])
self.image = Image.open(path)
self.display(self.image)
def rotate_image(self):
if not 'image' in self.__dict__:
return
self.image = self.image.rotate(-90, expand=True)
self.display(self.image)
def display(self, image):
w, h = image.size
self.display_w, self.display_h = (SIZE, h * SIZE // w) if w > h else (w * SIZE // h, SIZE)
self.display_image = ImageTk.PhotoImage(image.resize((self.display_w, self.display_h), Image.ANTIALIAS))
self.image_label.configure(image=self.display_image)
self.prediction_text.set('')
def predict(self):
if not 'image' in self.__dict__:
return
cropped, bboxes, valid, valid_bboxes, groups, labels = [], [], [], [], [], []
image = np.array(self.image.convert('L'), np.uint8)
h, w = image.shape
thickness = int(w*0.005)
if w / h < RATIO:
width = w
height = width / RATIO
else:
height = h
width = height * RATIO
width, height = int(width), int(height)
zoom = 1
m_zoom = 2 ** (self.zoom_slider.get() / 2)
while zoom <= m_zoom:
scaled_w, scaled_h = int(w * zoom), int(h * zoom)
overflow_x, overflow_y = abs(width - scaled_w), abs(height - scaled_h)
coeff = w / scaled_w
scaled = cv.resize(image, (scaled_w, scaled_h))
step = int(SLIDE_STEP * scaled_w)
for i in range(0, overflow_x + step, step):
for j in range(0, overflow_y + step, step):
bboxes.append(((int(i*coeff), int(j*coeff)), (int(i*coeff + width*coeff), int(j*coeff + height*coeff))))
cropped.append(cv.resize(scaled[j:j+height, i:i+width], FINAL_SHAPE[:-1]).reshape(FINAL_SHAPE) / 255)
zoom *= ZOOM_MULT
predictions = self.model.predict(np.array(cropped))
img = np.array(self.image)
for i, prediction in enumerate(predictions):
code = decoder(prediction)
if code[:1] == '1':
valid.append(code[3:])
valid_bboxes.append(bboxes[i])
for i, bbox0 in enumerate(valid_bboxes):
for j, bbox1 in enumerate(valid_bboxes[i+1:]):
are_overlapping = max(bbox0[0][0], bbox1[0][0]) < min(bbox0[1][0], bbox1[1][0]) and max(bbox0[0][1], bbox1[0][1]) < min(bbox0[1][1], bbox1[1][1])
if are_overlapping:
appended = False
for group in groups:
if i in group or j+i+1 in group:
if not i in group:
group.append(i)
if not j+i+1 in group:
group.append(j+i+1)
appended = True
if not appended:
groups.append([i, j+i+1])
for i, bbox in enumerate(valid_bboxes):
is_in_group = False
for group in groups:
if i in group:
is_in_group = True
break
if not is_in_group:
groups.append([i])
for group in groups:
top, bottom, left, right, length = 0, 0, 0, 0, len(group)
if length == 1:
print('Unsure about group with a weak match: ' + valid[group[0]])
continue
letters, max_probs = [[], [], [], [], [], [], [], []], []
for index in group:
left += valid_bboxes[index][0][0]
top += valid_bboxes[index][0][1]
right += valid_bboxes[index][1][0]
bottom += valid_bboxes[index][1][1]
for i, letter in enumerate(valid[index]):
letters[i].append(letter)
for letter in letters:
c = Counter(letter)
max_prob = c.most_common(1)[0][1]
with_max_prob = []
for pair in c.most_common():
if pair[1] == max_prob:
with_max_prob.append(pair[0])
elif pair[1] < max_prob:
break
max_probs.append(with_max_prob)
possible = get_possible_label('', max_probs, [])
if len(possible) >= length // 2:
print('Unsure about group with labels: '+ ', '.join(possible))
img = cv.rectangle(img, (left//length, top//length), (right//length, bottom//length), (255, 0, 0), thickness=thickness//2)
else:
label = '/'.join(possible)
labels.append(label)
text_width = (right - left) // length
font_size = 1
is_too_long = False
for i in range(1,10):
size = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, i, thickness=thickness)[0]
if size[0] < text_width:
font_size = i
text_height = size[1]
else:
is_too_long = i == 1
break
img = img if is_too_long else cv.putText(img, label, (left//length + thickness, top//length + text_height + thickness), cv.FONT_HERSHEY_SIMPLEX, font_size, (0, 255, 0), thickness=thickness)
img = cv.rectangle(img, (left//length, top//length), (right//length, bottom//length), (0, 255, 0), thickness=thickness)
self.display(Image.fromarray(img))
self.prediction_text.set('\n'.join(labels))
root = tk.Tk()
App(root)
root.mainloop()
|
import math
dirac = [0 for i in range(1000)]
dirac[100] = 1 # needs a Dirac impulse to start swinging
amp = 0.995 # exponentially increasing amplitude
#amp = 1 # constant amplitude
#amp = 1.005 # exponentially decreasing amplitude
freq = 137 # a lovely frequency for a 1000 long array
sine = [0,0] # initialized to ease indexing
for i in range(1,len(dirac)):
sine.append((amp*math.sin(2*3.14159*freq)*dirac[i] - sine[i-1] + 2*amp*math.cos(2*3.141459*freq)*sine[i]) / amp**2)
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = [7.50, 3.50]
plt.rcParams["figure.autolayout"] = True
plt.title("Sine with Z-transform")
plt.plot(sine, color="blue")
plt.show()
|
from django.urls import include, path
from django.conf.urls import include, url
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
#router.register(r'heroes', views.HeroViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'.*', views.gateway.as_view())
] |
"""This gathers information about the random number generator function so that
it can be seeded to give any particular series of virtual coin tosses up to a
certain length.
"""
import random
FAIR_COIN = lambda: random.randint(0, 1)
RANDOM_SEED = random.seed
def has_all_sequences(seeds_for_sequences, length):
"""Return True if we have all permutations up to the length specified."""
return len(seeds_for_sequences) == 2 ** (length + 1) - 2
def sequence_for_seed(rand_func, seed_func, seed_val, length):
"""Return a list of booleans generated by rand_func after seeding."""
seed_func(seed_val)
return [rand_func() for _ in range(length)]
def find_seeds_for_sequences(max_length, seed_func, rand_func):
"""Return a dict mapping sequences to the random seed required to
generate them.
>>> find_seeds_for_sequences(FAIR_COIN, RANDOM_SEED, 2)
{(0,): 1, (0, 0): 9, (0, 1): 8, (1,): 0, (1, 0): 10, (1, 1): 6}
"""
seeds_for_sequences = {}
for length in range(1, max_length + 1):
seed_val = 0
while not has_all_sequences(seeds_for_sequences, length):
sequence = sequence_for_seed(rand_func, seed_func, seed_val, length)
seeds_for_sequences[tuple(sequence)] = seed_val
seed_val += 1
return seeds_for_sequences
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from io import BytesIO
from amazon.ion.writer import blocking_writer
from amazon.ion.writer_binary import binary_writer
from amazon.ion.writer_text import raw_writer
from ionhash.hasher import hash_writer
from .util import binary_reader_over
from .util import consume
from .util import hash_function_provider
def test_hash_writer():
ion_str = '[1, 2, {a: 3, b: (4 {c: 5} 6) }, 7]'
algorithm = "md5"
# generate events to be used to write the data
events = consume(binary_reader_over(ion_str))
_run_test(_writer_provider("binary"), events, algorithm)
_run_test(_writer_provider("text"), events, algorithm)
def _run_test(writer_provider, events, algorithm):
# capture behavior of an ion-python writer
expected_bytes = BytesIO()
w = blocking_writer(writer_provider(), expected_bytes)
expected_write_event_types = _write_to(w, events)
hw_bytes = BytesIO()
hw = hash_writer(blocking_writer(writer_provider(), hw_bytes), hash_function_provider(algorithm))
hw_write_event_types = _write_to(hw, events)
# assert writer/hash_writer response behavior is identical
assert hw_write_event_types == expected_write_event_types
# assert writer/hash_writer produced the same output
assert hw_bytes.getvalue() == expected_bytes.getvalue()
def _write_to(w, events):
write_event_types = []
for event in events:
write_event_types.append(w.send(event))
return write_event_types
def _writer_provider(type):
def _f():
if type == "binary":
return binary_writer()
elif type == "text":
return raw_writer()
return _f
|
from unittest.mock import patch
import pytest
from telegram.ext import CommandHandler
from autonomia.features import corona
@pytest.mark.vcr
def test_cmd_retrieve_covid_data(update, context):
with patch.object(update.message, "reply_markdown") as m:
context.args = ["ireland"]
corona.cmd_retrieve_covid_data(update, context)
m.assert_called_with(
"```\n"
"Updated 1587151959537\n"
"Country Ireland\n"
"Cases 13980\n"
"Today Cases 709\n"
"Deaths 530\n"
"Today Deaths 44\n"
"Recovered 77\n"
"Active 13373\n"
"Critical 156\n"
"Cases Per One Million 2831\n"
"Deaths Per One Million 107\n"
"Tests 90646\n"
"Tests Per One Million 18358\n"
"Continent Europe\n"
"```"
)
@pytest.mark.vcr
def test_cmd_retrieve_covid_data_not_found(update, context):
with patch.object(update.message, "reply_text") as m:
context.args = ["omg-ponneys"]
corona.cmd_retrieve_covid_data(update, context)
m.assert_called_with("omg-ponneys é país agora? \n Faz assim: /corona Brazil")
def test_cmd_retrieve_covid_data_no_country_passed(update, context):
with patch.object(update.message, "reply_text") as m:
context.args = []
corona.cmd_retrieve_covid_data(update, context)
m.assert_called_with("Esqueceu o país doidao?")
@patch.object(corona, "get_covid_data", side_effect=ValueError("Random Error"))
def test_cmd_retrieve_covid_raise_random_exception(update, context):
with patch.object(update.message, "reply_text") as m:
with pytest.raises(ValueError):
context.args = ["ie"]
corona.cmd_retrieve_covid_data(update, context)
m.assert_called_with("Deu ruim! Morri, mas passo bem")
def test_corona_factory():
handler = corona.corona_factory()
assert isinstance(handler, CommandHandler)
assert handler.callback == corona.cmd_retrieve_covid_data
assert handler.command == ["corona"]
assert handler.pass_args
|
import os
import config
# Compile the subject
def compile(fileName, userID, language):
if os.path.exists("compiled/" + fileName):
os.system("chmod 777 compiled/" + fileName)
os.system("rm compiled/" + fileName)
if language not in config.lang:
return "NOLANG"
print("Compiling subject's file...")
compileCMD = config.lang[language]["compile"]
_replace = [("[subjectFileName]", fileName), ("[userID]", userID)]
for ph, rep in _replace:
compileCMD = compileCMD.replace(ph, rep)
os.system(compileCMD)
if not os.path.exists("compiled/" + fileName):
return "NOCMP"
return None
|
import logging
from sqlalchemy.orm import Session
from pydantic import UUID4
from app.api.crud import user, product
logger = logging.getLogger("genicons").getChild("ml_caller")
async def caller(uid: UUID4, session: Session):
logger.info(caller.__name__)
# Request to model of StyleTransfer for make Products
user_data = user.read(session, uid)
"""
response = requests.get(f'http://{uid}')
data = response.json()
product.create(session, uid, data["rs"], data["c"])
"""
product.create(session, user_data.id, user_data.img, user_data.img)
logger.info("ml done")
|
"""
Modulo de carga de datos a partir de SQL o un CSV, y construccion de grafos
"""
import time
import networkx as nx
import numpy as np
import pandas as pd
import psycopg2
from h3 import h3
from networkx.algorithms import bipartite
from tqdm import tqdm
import backboning
import funciones as fn
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
USER = "USER"
PASSWORD = "PASSWORD"
DBNAME = "DBNAME"
HOST = "HOST"
PORT = "PORT"
class FromSQL():
r"""
Clase encargada de cargar a partir de queries de sql
Parameters
----------
user: str
Usuario
password: str
Password
dbname: str
Database Name
host: str
Host
port: str
Port
"""
def __init__(self, user=USER, password=PASSWORD, dbname=DBNAME, host=HOST, port=PORT):
self.user = user
self.password = password
self.dbname = dbname
self.host = host
self.port = port
def query2df(self, query, index=None):
r"""
Convierte una query de sql a un dataframe
Parameters
----------
query: str
Query
index: str
Que columna de la tabla se convertira en el indice del df
Returns
-------
data: DataFrame
Dataframe con los datos
"""
conn = psycopg2.connect(dbname=self.dbname,
user=self.user,
host=self.host,
password=self.password,
port=self.port)
dat = pd.read_sql_query(query, conn)
conn.close()
if not index is None:
dat = dat.set_index(index)
return dat
def visits_from_query(self, id_propiedades=None,
id_usuarios=None,
min_time=None, max_time=None,
save=False, savefile=None):
r"""
Construye un dataframe con las visitas tales que sean usuarios de id_usuarios,
propiedades de id_propiedades y entre min_time y max_time.
Parameters
----------
id_propiedades: lista de str
Propiedades de las que obtener las visitas.
id_usuarios: lista de str
Usuarios de los que obtener las visitas.
min_time: str
Tiempo minimo, con forma Y%m%d, ie. 2013-03-21, incluyente (>=)
max_time: str
Tiempo maximo, con forma Y%m%d, ie. 2013-03-21, excluyente (<)
save: bool
Guardar el DF
-------
data: DataFrame
Dataframe con las visitas
"""
query = ('SELECT id, id_entidad, id_usuario, created_at '
'FROM log_acciones '
'WHERE id_accion!=1 '
'AND id_usuario IS NOT NULL '
)
if not id_propiedades is None:
strids = str(tuple(id_propiedades))
query = query+('AND id_entidad in ' + strids+' ')
if not id_usuarios is None:
strids = str(tuple(id_usuarios))
query = query+('AND id_usuario in ' + strids+' ')
if not min_time is None:
query = query+('AND created_at >= \'' + min_time+'\' ')
if not max_time is None:
query = query+('AND created_at < \'' + max_time+'\' ')
visits = self.query2df(query, index="id")
visits = visits.sort_values(by="created_at")
if save:
if savefile is None:
visits.to_csv("visitas"+min_time+max_time+".csv")
else:
visits.to_csv(savefile)
return visits
def visits_from_csv(savefile):
r"""
Carga un dataframe de visitas de un .csv
Parameters
----------
savefile: str
Ubicacion del archivo.
Returns
-------
data: DataFrame
Dataframe con las visitas
"""
visits = pd.read_csv(savefile, index_col="id")
visits['created_at'] = pd.to_datetime(visits['created_at'])
return visits
class GraphConstructor():
def __init__(self, users, props, visits, feats=None, threshold=0, power=1, min_degree=5,
filter_method=backboning.disparity_filter):
r"""
Clase encargada de la construccion de los grafos
Parameters
----------
users: DataFrame
Dataframe con usuarios.
props: DataFrame
Dataframe con propiedades.
visits: DataFrame con visitas
Dataframe con visitas. Extension: podrian no ser visitas, solo basta que sea un
Dataframe con una columna ids de usuarios, y otra ids de propiedades.
feats: str
TO DO: caracteristicas que asignarle a un nodo.
threshold: float o None
threshold con el que filtrar al grafo bipartito.
power: float
potencia a la que elevar los enlaces del grafo bipartito. (no es necesario para el grafo bipartito)
min_degree: int
Grado minimo que deben tener los nodos.
filter_method: metodos de backboning
Que metodo de filtrado usar. (recomiendo usar el por defecto)
Atributes
---------
B: NetworkX Graph
Grafo bipartito original.
Users: NetworkX Graph
Grafo proyectado de usuarios original.
Props: NetworkX Graph
Grafo proyectado de propiedades original.
B_f: NetworkX Graph
Grafo bipartito filtrado.
Users_f: NetworkX Graph
Grafo proyectado de usuarios filtrado.
Props_f: NetworkX Graph
Grafo proyectado de propiedades filtrado.
users_nodes: list
Lista de nodos de usuarios.
prop_nodes: list
Lista de nodos de propiedades.
"""
def labeler(node, tipo):
"""
Funcion que a cada nodo, le da un label diciendo si casa/depto, venta/arriendo
(quizas se puede sacar y utilizar set_features_from_df)
"""
data_type = {0: props, 1: users}
df = data_type[tipo]
dic_labeler = {(0, 0): 0, (0, 1): 1, (1, 0): 2, (1, 1): 3}
def f(t):
if 1.5 <= t and t <= 2:
return 1
if 1 <= t and t <= 1.5:
return 0
if tipo == 0:
x = f(df.loc[node]["id_tipo_propiedad"])
y = f(df.loc[node]["id_modalidad"])
if tipo == 1:
x = f(df.loc[-1*node]["id_tipo_propiedad"])
y = f(df.loc[-1*node]["id_modalidad"])
return dic_labeler[x, y]
visits = visits.groupby(
["id_entidad", 'id_usuario']).size().reset_index()
# evita coliciones de ID entre usuarios y propiedades
visits["id_usuario"] = -1*visits["id_usuario"]
self.B = nx.Graph()
self.B.add_nodes_from(pd.unique(visits["id_entidad"]), bipartite=0)
self.B.add_nodes_from(pd.unique(visits["id_usuario"]), bipartite=1)
feat_dict = {node: [labeler(node, attrs["bipartite"])]
for node, attrs in self.B.nodes(data=True)}
nx.set_node_attributes(self.B, values=feat_dict, name="label")
if feats is not None:
#nx.set_node_attributes(self.B, values=0, name="feature")
pass
self.B.add_weighted_edges_from(visits.values)
# quitar nodos con grado menro a min_degree
dangling = [node for node in self.B.nodes if self.B.degree(
node) <= min_degree]
self.B.remove_nodes_from(dangling)
self.B = max(nx.connected_component_subgraphs(self.B), key=len)
self.B.graph['tipo'] = "bipartite"
self.prop_nodes = [n for n, d in self.B.nodes(
data=True) if d['bipartite'] == 0]
self.user_nodes = [n for n, d in self.B.nodes(
data=True) if d['bipartite'] == 1]
self.B_f = nx.Graph()
self.Props = nx.Graph()
self.Users = nx.Graph()
self.Props_f = nx.Graph()
self.Users_f = nx.Graph()
self.filter_weights("bipartite", threshold=threshold,
power=power, filter_method=filter_method)
def project(self, project_on, threshold=0, power=1, weight_function="jaccard",
filter_method=backboning.disparity_filter):
r"""
Obtiene la proyecciones del grafo bipartito para usuarios o propiedades
Parameters
----------
project_on: str
Proyectar en usuarios o propiedades
threshold: float o None
Umbral de filtro de enlaces, entre 0 y 1. para 0 no filtra y para 1 filtra todo los enlaces
power: float
Potencia a la que elevar los pesos de los enlaces.
weight_function: str
Funcion de peso para calcular peso de enlace, puede ser "jaccard" o "maximo
filter_method: metodos de backboning
Que metodo de filtrado usar. (recomiendo usar el por defecto)
"""
function_dict = {"jaccard": fn.weighted_jaccard,
"maximo": fn.weighted_maximum}
nodes_dict = {"users": self.user_nodes, "props": self.prop_nodes}
nodes = nodes_dict[project_on]
D = bipartite.projection.generic_weighted_projected_graph(
self.B, nodes, weight_function=function_dict[weight_function])
D = max(nx.connected_component_subgraphs(D), key=len)
D.graph['tipo'] = project_on
D.graph['function'] = weight_function
if project_on == "users":
mapping = {node: -1*node for node in D}
self.Users = D
self.Users = nx.relabel_nodes(self.Users, mapping=mapping)
if project_on == "props":
self.Props = D
self.filter_weights(project_on, threshold=threshold,
power=power, filter_method=filter_method)
def filter_weights(self, filter_on, threshold=None, power=1, filter_method=backboning.disparity_filter):
r"""
Filtra el grafo bipartito o una de sus proyecciones.
Parameters
----------
filter_on: str
Filtrar el grafo de usuarios, de propiedades o bipartito
threshold: float o None
Umbral de filtro de enlaces, entre 0 y 1. para 0 no filtra y para 1 filtra todo los enlaces
power: float
Potencia a la que elevar los pesos de los enlaces.
filter_method: metodos de backboning
Que metodo de filtrado usar. (recomiendo usar el por defecto)
"""
# source: grafo original, filtered: grafo filtrado u objetivo
dic_source = {"bipartite": self.B,
"users": self.Users, "props": self.Props}
source = dic_source[filter_on]
dic_filtered = {"bipartite": self.B_f,
"users": self.Users_f, "props": self.Props_f}
filtered = dic_filtered[filter_on]
# filtrado
# D: grafo dummy
if threshold is None:
# si threshold es None, no hay que filtrar, y solo hay que actualizar la potencia de los pesos
D = filtered
if threshold is not None:
# Para filtrar se le necesita entregar a backboning una dataframe con forma
# src,trg, nij; donde src y trg son nodos y nij el peso
D = source
table = nx.to_pandas_edgelist(D)
table.columns = ["src", "trg", "nij"]
nc_table = filter_method(table, undirected=True)
nc_backbone = backboning.thresholding(nc_table, threshold)
nc_backbone.columns = ["src", "trg", "weight", "score"]
D = nx.from_pandas_edgelist(nc_backbone, "src", "trg", "weight")
# elevar
for *edge, data in source.edges(data=True):
if edge in D.edges():
weight = data['weight']
weight = weight**power
D.add_edge(*edge, weight=weight)
if len(D) != 0:
D = max(nx.connected_component_subgraphs(D), key=len)
D.graph['tipo'] = filter_on
attrs_dict = {node: att for node, att in source.nodes(data=True)}
# seteo
if filter_on == "bipartite":
self.B_f = D
nx.set_node_attributes(self.B_f, values=attrs_dict)
self.B_f.graph["threshold"] = threshold
self.B_f.graph["power"] = power
if filter_on == "users":
self.Users_f = D
nx.set_node_attributes(self.Users_f, values=attrs_dict)
self.Users_f.graph["threshold"] = threshold
self.Users_f.graph["power"] = power
if filter_on == "props":
self.Props_f = D
nx.set_node_attributes(self.Props_f, values=attrs_dict)
self.Props_f.graph["threshold"] = threshold
self.Props_f.graph["power"] = power
def get_histogram(self, hist_on, filtered=True):
r"""
Obtension del histograma de pesos de los enlaces
Parameters
----------
hist_on: str
histograma del grafo de usuarios, de propiedades o bipartito
filtered: bool
si es el caso filtrado o no
"""
dic = {True: {"bipartite": self.B_f, "users": self.Users_f, "props": self.Props_f},
False: {"bipartite": self.B, "users": self.Users, "props": self.Props}}
D = dic[filtered][hist_on]
hist = [edge[2]['weight'] for edge in D.edges(data=True)]
return hist
def set_features_from_graph(self, on, filtered=True, mode="adj", name="feature"):
r"""
Setea caracteristicas de un nodo a partir de propiedades del grafo: matriz de adyacencia,
onehot vector o topologicas.
Parameters
----------
on: str
Setear en grafo de usuarios, de propiedades o bipartito.
filtered: bool
Si es el caso filtrado o no.
mode: str
Que caracteristica asignarle: adj, onehot, topos.
name: str
Nombre a la que llamar a la caracteristica.
"""
dic = {True: {"bipartite": self.B_f, "users": self.Users_f, "props": self.Props_f},
False: {"bipartite": self.B, "users": self.Users, "props": self.Props}}
D = dic[filtered][on]
if mode == "onehot":
def dirac(i, j):
if i == j:
return 1
else:
return 0
for i, node in enumerate(D.nodes):
D.nodes[node][name] = [dirac(i, j) for j in range(len(D))]
elif mode == "adj":
A = nx.adjacency_matrix(D).todense().A
for i, node in enumerate(D.nodes):
D.nodes[node][name] = list(A[i])
elif mode == "topos":
bb = nx.betweenness_centrality(D, k=100)
deg = nx.degree(D)
pr = nx.pagerank(D)
clust = nx.clustering(D)
eig = nx.eigenvector_centrality(D)
for i, node in enumerate(D.nodes):
D.nodes[node][name] = [bb[node], deg[node],
pr[node], clust[node], eig[node]]
#nx.set_node_attributes(self.B, values=0, name="feature")
def set_features_from_df(self, on, filtered=True, df=pd.DataFrame(), name="feature"):
r"""
Setea caracteristicas de un nodo a partir de un dataframe
Parameters
----------
on: str
Setear en grafo de usuarios, de propiedades o bipartito.
filtered: bool
Si es el caso filtrado o no.
df: DataFrame
Dataframe con indices los nodos y columnas los valores que asignarles.
name: str
Nombre a la que llamar a la caracteristica.
"""
dic = {True: {"bipartite": self.B_f, "users": self.Users_f, "props": self.Props_f},
False: {"bipartite": self.B, "users": self.Users, "props": self.Props}}
D = dic[filtered][on]
for i, node in enumerate(D.nodes):
D.nodes[node][name] = df.loc[node].values
#nx.set_node_attributes(self.B, values=0, name="feature")
def set_graphs(self, G, on, filtered):
r"""
Setea los grafos a unos que ya se han obtenido.
Parameters
----------
G: NetworkX Graph
Grafo con que se va a substituir
on: str
Grafo de usuarios, de propiedades o bipartito.
filtered: bool
Si es el caso filtrado o no.
"""
if filtered:
if on == "bipartite":
self.B_f = G
if on == "users":
self.Users_f = G
if on == "props":
self.Props_f = G
if not filtered:
if on == "bipartite":
self.B = G
self.prop_nodes = {n for n, d in self.B.nodes(
data=True) if d['bipartite'] == 0}
self.user_nodes = {n for n, d in self.B.nodes(
data=True) if d['bipartite'] == 1}
if on == "users":
self.Users = G
if on == "props":
self.Props = G
def find_best_filter(self,on):
pass
def find_best_power(self,on):
pass
def plot_histogram(self, on, filtered=True, weighted=True, **kwargs):
r"""
Ploteo del histograma de enlaces de un grafo
Parameters
----------
on: str
Histograma del grafo de usuarios, de propiedades o bipartito.
filtered: bool
Si es el caso filtrado o no.
weighted: bool
Si el grafico debe estar normalizado. (deberia se True la mayoria de las veces)
"""
dic_name={"users":"Usuarios","props":"Propiedades","bipartite": "Bipartito"}
hist=self.get_histogram(on,filtered)
fig, ax = plt.subplots()
if weighted:
weights=np.ones(len(hist))/len(hist)
else:
weights=None
histo=ax.hist(hist,weights=weights,**kwargs)
ax.set_xlabel("Peso")
ax.set_ylabel("Frecuencia")
ax.set_title("Distribucion Pesos Grafo "+dic_name[on])
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
return fig, ax, histo
def plot_std(self, on, powers=np.linspace(0,1,11)):
r"""
Ploteo de la desviacion estandar de los pesos de enlaces de un grafo, para distintas potencias
Parameters
----------
on: str
Histograma del grafo de usuarios, de propiedades o bipartito.
powers: Array np.
Potencias que plotear:
"""
dic_name={"users":"Usuarios","props":"Propiedades","bipartite": "Bipartito"}
std=[]
for p in tqdm(powers):
self.filter_weights(on,power=p)
hist=self.get_histogram(on,filtered=True)
std.append(np.std(hist))
fig, ax = plt.subplots()
plot=ax.plot(powers,std)
ax.set_title("Desviacion Estandar "+dic_name[on])
ax.set_xlabel("p")
ax.set_ylabel("Desviacion Estandar")
return fig, ax, plot
def plot_filters(self,on, filters=None):
r"""
Ploteo de la covertura vs enlaces relativos para distintos valores del filtro.
Parameters
----------
on: str
Histograma del grafo de usuarios, de propiedades o bipartito.
filters: Array np.
Filtros que plotear, recomiendo que este en escala logaritmica y que
contenga al 1 y al 0
"""
if filters is None:
filters=np.logspace(-1,0,num=9)
filters=np.insert(filters,0,0)
filters=1-filters
tipo_dict={"bipartite":self.B,"users":self.Users,"props":self.Props}
dic_name={"users":"Usuarios","props":"Propiedades","bipartite": "Bipartito"}
edgtotal=len(tipo_dict[on].edges)
nodetotal=len([node for node in tipo_dict[on].nodes if tipo_dict[on].degree(node) > 1])
coverages=[]
edges=[]
nodes=[]
for alpha in tqdm(filters):
self.filter_weights(on,threshold=alpha)
tipo_f_dict={"bipartite":self.B_f,"users":self.Users_f,"props":self.Props_f}
non_isolated=len([node for node in tipo_f_dict[on].nodes if tipo_dict[on].degree(node) > 1])
coverage=non_isolated/nodetotal
edges.append(len(tipo_f_dict[on].edges))
nodes.append(len(tipo_f_dict[on]))
coverages.append(coverage)
edge_relativo=[i/edgtotal for i in edges]
print(edge_relativo,coverages)
fig, ax = plt.subplots()
scatter=plt.scatter(edge_relativo,coverages,c=filters, cmap="winter")
plot=ax.plot(edge_relativo,coverages,linewidth=1,linestyle='dashed')
ax.set_title("Filtrado "+dic_name[on])
ax.set_xlabel("Enlaces Relativos")
ax.set_ylabel("Cobertura")
plt.colorbar()
return fig, ax, scatter, plot
def add_user_attributes(users, props, visits, std=False):
r"""
Asignar caracteristicas de usuarios a partir de las propiedades que visito
Parameters
----------
users: DataFrame
Dataframe de usuarios.
props: DataFrame
Dataframe de propiedades.
visits: DataFrame
Dataframe de visitas.
std: bool
Si añadir las desviaciones estandar de las caracteristicas
"""
visits_labeled = visits.merge(
props, left_on="id_entidad", right_index=True)
features = list(visits_labeled.columns)
features.remove("id_entidad")
features.remove("id_usuario")
users_labeled = users
for feat in features:
grouped_visits_labeled = visits_labeled.groupby(
"id_usuario")[feat].apply(np.mean).to_frame()
users_labeled = users_labeled.merge(
grouped_visits_labeled, right_index=True, left_index=True)
if std:
grouped_visits_labeled = visits_labeled.groupby(
"id_usuario")[feat].apply(np.std).to_frame()
users_labeled = users_labeled.merge(
grouped_visits_labeled, right_index=True, left_index=True, suffixes=(None, '_std'))
users = users_labeled
visits = visits_labeled
users["6h3"] = users.apply(lambda x: h3.geo_to_h3(
x['ubicacion_latitud'], x['ubicacion_longitud'], 6), axis=1)
users["8h3"] = users.apply(lambda x: h3.geo_to_h3(
x['ubicacion_latitud'], x['ubicacion_longitud'], 8), axis=1)
users["10h3"] = users.apply(lambda x: h3.geo_to_h3(
x['ubicacion_latitud'], x['ubicacion_longitud'], 10), axis=1)
return users
|
# Write your code here
from collections import defaultdict
class Graph :
def __init__(self,V) :
self.V = V
self.graph = defaultdict(list)
def DFSUtil(self,temp,v,visited) :
visited[v] = True
temp.append(v)
for i in self.graph[v] :
if visited[i] == False :
temp = self.DFSUtil(temp,i,visited)
return temp
def addEdge(self,v,w) :
self.graph[v].append(w)
self.graph[w].append(v)
def connectedcomponents(self) :
visited = [False] * self.V
cc = []
for v in range(self.V) :
if visited[v] == False :
temp = []
cc.append(self.DFSUtil(temp,v,visited))
return cc
if __name__ == "__main__":
n,m,f = map(int,input().split())
g = Graph(n+1)
while m > 0 :
u,v = map(int,input().split())
g.addEdge(u,v)
m -= 1
cc = g.connectedcomponents()
maximum = 0
for i in cc :
if len(i) > maximum :
maximum = len(i)
if maximum%f == 0 :
print(maximum//f)
elif maximum//f == 0 :
print("1")
else :
print(maximum//f)
|
"""
Copyright 2017 Robin Verschueren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms import (MatrixFrac, Pnorm, QuadForm, abs, bmat, conj, conv,
cumsum, imag, kron, lambda_max, lambda_sum_largest,
log_det, norm1, norm_inf, quad_over_lin, real,
reshape, sigma_max, trace, upper_tri,)
from cvxpy.atoms.affine.add_expr import AddExpression
from cvxpy.atoms.affine.binary_operators import (DivExpression, MulExpression,
multiply,)
from cvxpy.atoms.affine.diag import diag_mat, diag_vec
from cvxpy.atoms.affine.hstack import Hstack
from cvxpy.atoms.affine.index import index, special_index
from cvxpy.atoms.affine.promote import Promote
from cvxpy.atoms.affine.sum import Sum
from cvxpy.atoms.affine.transpose import transpose
from cvxpy.atoms.affine.unary_operators import NegExpression
from cvxpy.atoms.affine.vstack import Vstack
from cvxpy.atoms.norm_nuc import normNuc
from cvxpy.constraints import (PSD, SOC, Equality, Inequality, NonNeg, NonPos,
Zero,)
from cvxpy.expressions.constants import Constant, Parameter
from cvxpy.expressions.variable import Variable
from cvxpy.reductions.complex2real.atom_canonicalizers.abs_canon import (
abs_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.aff_canon import (
binary_canon, conj_canon, imag_canon, real_canon, separable_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.constant_canon import (
constant_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.equality_canon import (
equality_canon, zero_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.inequality_canon import (
inequality_canon, nonneg_canon, nonpos_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.matrix_canon import (
hermitian_canon, lambda_sum_largest_canon, matrix_frac_canon,
norm_nuc_canon, quad_canon, quad_over_lin_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.param_canon import (
param_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.pnorm_canon import (
pnorm_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.psd_canon import (
psd_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.soc_canon import (
soc_canon,)
from cvxpy.reductions.complex2real.atom_canonicalizers.variable_canon import (
variable_canon,)
CANON_METHODS = {
AddExpression: separable_canon,
bmat: separable_canon,
cumsum: separable_canon,
diag_mat: separable_canon,
diag_vec: separable_canon,
Hstack: separable_canon,
index: separable_canon,
special_index: separable_canon,
Promote: separable_canon,
reshape: separable_canon,
Sum: separable_canon,
trace: separable_canon,
transpose: separable_canon,
NegExpression: separable_canon,
upper_tri: separable_canon,
Vstack: separable_canon,
conv: binary_canon,
DivExpression: binary_canon,
kron: binary_canon,
MulExpression: binary_canon,
multiply: binary_canon,
conj: conj_canon,
imag: imag_canon,
real: real_canon,
Variable: variable_canon,
Constant: constant_canon,
Parameter: param_canon,
Inequality: inequality_canon,
NonPos: inequality_canon,
NonNeg: inequality_canon,
PSD: psd_canon,
SOC: soc_canon,
Equality: equality_canon,
Zero: equality_canon,
abs: abs_canon,
norm1: pnorm_canon,
norm_inf: pnorm_canon,
Pnorm: pnorm_canon,
lambda_max: hermitian_canon,
log_det: norm_nuc_canon,
normNuc: norm_nuc_canon,
sigma_max: hermitian_canon,
QuadForm: quad_canon,
quad_over_lin: quad_over_lin_canon,
MatrixFrac: matrix_frac_canon,
lambda_sum_largest: lambda_sum_largest_canon,
}
|
import ctypes
import inspect
import threading
from functools import partial
from typing import Any, Callable
from tortfunc.exceptions import FunctionTimeOut
def threaded_timeout(function: Callable[..., Any], timeout: float) -> Callable[..., Any]:
def thread_kill_func(*args, **kwargs):
# First param is gibberish to prevent it conflicting with main function's kwargs
def returnable_func(asfasfvzc: list, *targs, **tkwargs):
asfasfvzc.append(function(*targs, **tkwargs))
ret = []
target_func = partial(returnable_func, ret)
kill_thread = KillableThread(target=target_func, args=args, kwargs=kwargs)
kill_thread.start()
kill_thread.join(timeout=timeout)
if kill_thread.is_alive():
kill_thread.terminate()
raise FunctionTimeOut()
if ret:
return ret[0]
return thread_kill_func
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
class KillableThread(threading.Thread):
"""A thread that can be manually killed by raising an exception.
Based on code from: http://tomerfiliba.com/recipes/Thread2/
"""
def _get_my_tid(self):
"""determines this (self's) thread id"""
if not self.isAlive():
raise threading.ThreadError("the thread is not active")
# do we have it cached?
if hasattr(self, "_thread_id"):
return self._thread_id
# no, look for it in the _active dict
for tid, tobj in threading._active.items():
if tobj is self:
self._thread_id = tid
return tid
raise AssertionError("could not determine the thread's id")
def raise_exc(self, exctype):
"""raises the given exception type in the context of this thread"""
_async_raise(self.ident, exctype)
def terminate(self):
"""raises SystemExit in the context of the given thread, which should
cause the thread to exit silently (unless caught)"""
self.raise_exc(SystemExit)
|
import os
def test_import_iris():
import iris
assert (iris.__version__.count(".") == 2)
def test_import_netCDF4():
import netCDF4
assert (netCDF4.__version__.count(".") == 2)
def test_import_netcdftime():
import netcdftime
assert (netcdftime.__version__.count(".") == 2)
def test_import_gdal():
import gdal
def test_import_matplotlib():
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.plot(range(5))
plotfile = '/tmp/test.png'
plt.savefig(plotfile)
assert(os.path.isfile(plotfile))
|
"""Test str render module."""
import hashlib
import unittest
from bootstrap4c4d.classes.description import Description
from bootstrap4c4d.reducers.str import reduce_strings
from bootstrap4c4d.render.str import render_strings
class TestStringsRender(unittest.TestCase):
def test_render_strings(self):
description = Description({
"id": "MY_DESCRIPTION",
"key": "CONTAINER",
"value": None,
"locales": {
"strings_us": "My Description"
}
})
result = render_strings(reduce_strings(description))
expected_result = {
"strings_us": """STRINGTABLE MY_DESCRIPTION
{
MY_DESCRIPTION "My Description";
}"""
}
self.assertDictEqual(result, expected_result)
|
"""
Functions associated with NetCDF for tracker.
"""
from lo_tools import Lfun
import xarray as xr
# Info for NetCDF output, organized as {variable name: (long_name, units)}
name_unit_dict = {'lon':('Longitude','degrees'), 'lat':('Latitude','degrees'),
'cs':('Fractional Z','Dimensionless'), 'ot':('Ocean Time',Lfun.roms_time_units),
'z':('Z','m'), 'zeta':('Surface Z','m'), 'zbot':('Bottom Z','m'),
'salt':('Salinity','Dimensionless'), 'temp':('Potential Temperature','Degrees C'),
'oxygen':('Dissolved Oxygen', 'millimole_oxygen meter-3'),
'u':('EW Velocity','meters s-1'), 'v':('NS Velocity','meters s-1'),
'w':('Vertical Velocity','meters s-1'),
'Uwind':('EW Wind Velocity','meters s-1'), 'Vwind':('NS Velocity','meters s-1'),
'h':('Bottom Depth','m'),
'hit_sidewall':('Hit Sidewall','1=hit'),
'hit_bottom':('Hit Bottom','1=hit'),
'hit_top':('Hit Top','1=hit'),
'bad_pcs':('Bad Pcs','1=bad')}
def write_grid(g_infile, g_outfile):
# write a file of grid info
dsh = xr.open_dataset(g_infile)
# lists of variables to process
v_dict = dict()
for vn in ['lon_rho', 'lat_rho', 'mask_rho', 'h']:
v_dict[vn] = (('eta_rho', 'xi_rho'), dsh[vn].values)
dsg = xr.Dataset(v_dict)
dsg.to_netcdf(g_outfile)
dsh.close()
dsg.close()
def start_outfile(out_fn, P):
v_dict = dict()
for vn in P.keys():
if vn == 'ot':
v_dict[vn] = (('Time'), P[vn])
else:
v_dict[vn] = (('Time', 'Particle'), P[vn])
ds = xr.Dataset(v_dict)
for vn in P.keys():
ds[vn].attrs['long_name'] = name_unit_dict[vn][0]
ds[vn].attrs['units'] = name_unit_dict[vn][1]
ds.to_netcdf(out_fn)
ds.close()
def append_to_outfile(out_fn, P):
"""
This works fine, but I worry about it when there are many days. Will it slow down?
It could be replaced by putting each daily output file in a temp dir, and then
using ncrcat at the end.
"""
v_dict = dict()
for vn in P.keys():
if vn == 'ot':
v_dict[vn] = (('Time'), P[vn][1:])
else:
v_dict[vn] = (('Time', 'Particle'), P[vn][1:,:])
ds1 = xr.Dataset(v_dict)
ds0 = xr.open_dataset(out_fn, decode_times=False)
ds2 = xr.concat((ds0,ds1), 'Time')
ds0.close()
ds1.close()
out_fn.unlink(missing_ok=True)
ds2.to_netcdf(out_fn)
ds2.close()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Repository']
class Repository(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
domain_owner: Optional[pulumi.Input[str]] = None,
repository: Optional[pulumi.Input[str]] = None,
upstreams: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RepositoryUpstreamArgs']]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a CodeArtifact Repository Resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_key = aws.kms.Key("exampleKey", description="domain key")
example_domain = aws.codeartifact.Domain("exampleDomain",
domain="example",
encryption_key=example_key.arn)
test = aws.codeartifact.Repository("test",
repository="example",
domain=example_domain.domain)
```
### With Upstream Repository
```python
import pulumi
import pulumi_aws as aws
upstream = aws.codeartifact.Repository("upstream",
repository="upstream",
domain=aws_codeartifact_domain["test"]["domain"])
test = aws.codeartifact.Repository("test",
repository="example",
domain=aws_codeartifact_domain["example"]["domain"],
upstreams=[aws.codeartifact.RepositoryUpstreamArgs(
repository_name=upstream.repository,
)])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the repository.
:param pulumi.Input[str] domain: The domain that contains the created repository.
:param pulumi.Input[str] domain_owner: The account number of the AWS account that owns the domain.
:param pulumi.Input[str] repository: The name of the repository to create.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RepositoryUpstreamArgs']]]] upstreams: A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. see Upstream
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
if domain is None:
raise TypeError("Missing required property 'domain'")
__props__['domain'] = domain
__props__['domain_owner'] = domain_owner
if repository is None:
raise TypeError("Missing required property 'repository'")
__props__['repository'] = repository
__props__['upstreams'] = upstreams
__props__['administrator_account'] = None
__props__['arn'] = None
__props__['external_connections'] = None
super(Repository, __self__).__init__(
'aws:codeartifact/repository:Repository',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
administrator_account: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
domain_owner: Optional[pulumi.Input[str]] = None,
external_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RepositoryExternalConnectionArgs']]]]] = None,
repository: Optional[pulumi.Input[str]] = None,
upstreams: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RepositoryUpstreamArgs']]]]] = None) -> 'Repository':
"""
Get an existing Repository resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administrator_account: The account number of the AWS account that manages the repository.
:param pulumi.Input[str] arn: The ARN of the repository.
:param pulumi.Input[str] description: The description of the repository.
:param pulumi.Input[str] domain: The domain that contains the created repository.
:param pulumi.Input[str] domain_owner: The account number of the AWS account that owns the domain.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RepositoryExternalConnectionArgs']]]] external_connections: An array of external connections associated with the repository. see External Connections
:param pulumi.Input[str] repository: The name of the repository to create.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RepositoryUpstreamArgs']]]] upstreams: A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. see Upstream
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["administrator_account"] = administrator_account
__props__["arn"] = arn
__props__["description"] = description
__props__["domain"] = domain
__props__["domain_owner"] = domain_owner
__props__["external_connections"] = external_connections
__props__["repository"] = repository
__props__["upstreams"] = upstreams
return Repository(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="administratorAccount")
def administrator_account(self) -> pulumi.Output[str]:
"""
The account number of the AWS account that manages the repository.
"""
return pulumi.get(self, "administrator_account")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the repository.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the repository.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[str]:
"""
The domain that contains the created repository.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter(name="domainOwner")
def domain_owner(self) -> pulumi.Output[str]:
"""
The account number of the AWS account that owns the domain.
"""
return pulumi.get(self, "domain_owner")
@property
@pulumi.getter(name="externalConnections")
def external_connections(self) -> pulumi.Output[Sequence['outputs.RepositoryExternalConnection']]:
"""
An array of external connections associated with the repository. see External Connections
"""
return pulumi.get(self, "external_connections")
@property
@pulumi.getter
def repository(self) -> pulumi.Output[str]:
"""
The name of the repository to create.
"""
return pulumi.get(self, "repository")
@property
@pulumi.getter
def upstreams(self) -> pulumi.Output[Optional[Sequence['outputs.RepositoryUpstream']]]:
"""
A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. see Upstream
"""
return pulumi.get(self, "upstreams")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import yaml
import glob
import types
import bmeg.ioutils
from tabulate import tabulate
""" simple transform of dvc into md """
DEFAULT_DIRECTORY = 'meta'
HUGO_HEADER = """
---
title: Data Sources
sidebar: true
menu:
main:
parent: Building Graph
weight: 2
---
"""
def cardify(papers):
"""create a 'card' for each paper."""
papers.sort(key=lambda item: (len(item['comment']), item))
lines = []
lines.append('{{% card-container %}}')
for paper in papers:
source = paper['path'].replace('source/', '').replace('/*', '')
lines.append('{{{{% card title="{}" %}}}}'.format(source))
lines.append(paper['comment'])
for publication in paper['publications'].split(','):
lines.append('[paper]({})'.format(publication))
lines.append('{{% /card %}}')
lines.append('{{% /card-container %}}')
return '\n'.join(lines)
def transform(
source_dir=".",
papers_path='source/meta/bmeg-source-data-papers.tsv',
emitter_directory=None
):
papers_reader = bmeg.ioutils.read_tsv(papers_path)
papers = []
for source in papers_reader:
papers.append(source)
if not emitter_directory:
emitter_directory = 'outputs/meta'
with open('{}/dvc.md'.format(emitter_directory), 'w') as f:
f.write(HUGO_HEADER)
f.write('# Data Sources Summary\n')
# f.write(tabulate(papers, headers="keys", tablefmt="pipe"))
f.write(cardify(papers))
f.write('\n')
path = '{}/source*.dvc'.format(source_dir)
source_data = []
for filename in glob.iglob(path, recursive=True):
with open(filename, 'r') as stream:
dvc = yaml.safe_load(stream)
if 'cmd' not in dvc:
dvc['cmd'] = ''
dvc = types.SimpleNamespace(**dvc)
if hasattr(dvc, 'outs'):
for out in dvc.outs:
del out['cache']
out['path'] = out['path'].replace('_', '\\_')
source_data.append(out)
f.write('# Data Sources Detail\n')
f.write(tabulate(source_data, headers="keys", tablefmt="pipe"))
f.write('\n')
path = '{}/output*.dvc'.format(source_dir)
out_data = []
for filename in glob.iglob(path, recursive=True):
with open(filename, 'r') as stream:
dvc = yaml.load(stream)
if 'cmd' not in dvc:
dvc['cmd'] = ''
dvc = types.SimpleNamespace(**dvc)
if hasattr(dvc, 'outs'):
for out in dvc.outs:
out['path'] = out['path'].replace('_', '\\_')
out_data.append({'cmd': dvc.cmd.replace('_', '\\_').split(';')[0], 'output': '<br/>'.join([(out['path']) for out in dvc.outs])})
f.write('# Transformation Detail\n')
f.write(tabulate(out_data, headers="keys", tablefmt="pipe"))
f.write('\n')
if __name__ == '__main__':
transform()
|
import theano.tensor as T
from .. import init
from .. import nonlinearities
from ..utils import as_tuple, int_types, inspect_kwargs
from ..theano_extensions import conv
from .base import Layer
__all__ = [
"Conv1DLayer",
"Conv2DLayer",
"Conv3DLayer",
"TransposedConv2DLayer",
"Deconv2DLayer",
"DilatedConv2DLayer",
"TransposedConv3DLayer",
"Deconv3DLayer",
]
def conv_output_length(input_length, filter_size, stride, pad=0):
"""Helper function to compute the output size of a convolution operation
This function computes the length along a single axis, which corresponds
to a 1D convolution. It can also be used for convolutions with higher
dimensionalities by using it individually for each axis.
Parameters
----------
input_length : int or None
The size of the input.
filter_size : int
The size of the filter.
stride : int
The stride of the convolution operation.
pad : int, 'full' or 'same' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size on both sides (one less on
the second side for an even filter size). When ``stride=1``, this
results in an output size equal to the input size.
Returns
-------
int or None
The output size corresponding to the given convolution parameters, or
``None`` if `input_size` is ``None``.
Raises
------
ValueError
When an invalid padding is specified, a `ValueError` is raised.
"""
if input_length is None:
return None
if pad == 'valid':
output_length = input_length - filter_size + 1
elif pad == 'full':
output_length = input_length + filter_size - 1
elif pad == 'same':
output_length = input_length
elif isinstance(pad, int_types):
output_length = input_length + 2 * pad - filter_size + 1
else:
raise ValueError('Invalid pad: {0}'.format(pad))
# This is the integer arithmetic equivalent to
# np.ceil(output_length / stride)
output_length = (output_length + stride - 1) // stride
return output_length
def conv_input_length(output_length, filter_size, stride, pad=0):
"""Helper function to compute the input size of a convolution operation
This function computes the length along a single axis, which corresponds
to a 1D convolution. It can also be used for convolutions with higher
dimensionalities by using it individually for each axis.
Parameters
----------
output_length : int or None
The size of the output.
filter_size : int
The size of the filter.
stride : int
The stride of the convolution operation.
pad : int, 'full' or 'same' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size on both sides (one less on
the second side for an even filter size). When ``stride=1``, this
results in an output size equal to the input size.
Returns
-------
int or None
The smallest input size corresponding to the given convolution
parameters for the given output size, or ``None`` if `output_size` is
``None``. For a strided convolution, any input size of up to
``stride - 1`` elements larger than returned will still give the same
output size.
Raises
------
ValueError
When an invalid padding is specified, a `ValueError` is raised.
Notes
-----
This can be used to compute the output size of a convolution backward pass,
also called transposed convolution, fractionally-strided convolution or
(wrongly) deconvolution in the literature.
"""
if output_length is None:
return None
if pad == 'valid':
pad = 0
elif pad == 'full':
pad = filter_size - 1
elif pad == 'same':
pad = filter_size // 2
if not isinstance(pad, int_types):
raise ValueError('Invalid pad: {0}'.format(pad))
return (output_length - 1) * stride - 2 * pad + filter_size
class BaseConvLayer(Layer):
"""
lasagne.layers.BaseConvLayer(incoming, num_filters, filter_size,
stride=1, pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, flip_filters=True,
n=None, **kwargs)
Convolutional layer base class
Base class for performing an `n`-dimensional convolution on its input,
optionally adding a bias and applying an elementwise nonlinearity. Note
that this class cannot be used in a Lasagne network, only its subclasses
can (e.g., :class:`Conv1DLayer`, :class:`Conv2DLayer`).
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. Must
be a tensor of 2+`n` dimensions:
``(batch_size, num_input_channels, <n spatial dimensions>)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or an `n`-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or an `n`-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
all borders, a tuple of `n` integers allows different symmetric padding
per dimension.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If ``True``, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be an
`n`-dimensional tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a tensor of 2+`n` dimensions with shape
``(num_filters, num_input_channels, <n spatial dimensions>)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, <n spatial dimensions>)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
flip_filters : bool (default: True)
Whether to flip the filters before sliding them over the input,
performing a convolution (this is the default), or not to flip them and
perform a correlation. Note that for some other convolutional layers in
Lasagne, flipping incurs an overhead and is disabled by default --
check the documentation when using learned weights from another layer.
num_groups : int (default: 1)
The number of groups to split the input channels and output channels
into, such that data does not cross the group boundaries. Requires the
number of channels to be divisible by the number of groups, and
requires Theano 0.10 or later for more than one group.
n : int or None
The dimensionality of the convolution (i.e., the number of spatial
dimensions of each feature map and each convolutional filter). If
``None``, will be inferred from the input shape.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
"""
def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0,
untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, flip_filters=True,
trainable=True, regularizable=True,
num_groups=1, n=None, **kwargs):
super(BaseConvLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
if n is None:
n = len(self.input_shape) - 2
elif n != len(self.input_shape) - 2:
raise ValueError("Tried to create a %dD convolution layer with "
"input shape %r. Expected %d input dimensions "
"(batchsize, channels, %d spatial dimensions)." %
(n, self.input_shape, n+2, n))
self.n = n
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, n, int_types)
self.flip_filters = flip_filters
self.stride = as_tuple(stride, n, int_types)
self.untie_biases = untie_biases
if pad == 'same':
if any(s % 2 == 0 for s in self.filter_size):
raise NotImplementedError(
'`same` padding requires odd filter size.')
if pad == 'valid':
self.pad = as_tuple(0, n)
elif pad in ('full', 'same'):
self.pad = pad
else:
self.pad = as_tuple(pad, n, int_types)
if (num_groups <= 0 or
self.num_filters % num_groups != 0 or
self.input_shape[1] % num_groups != 0):
raise ValueError(
"num_groups (here: %d) must be positive and evenly divide the "
"number of input and output channels (here: %d and %d)" %
(num_groups, self.input_shape[1], self.num_filters))
elif (num_groups > 1 and
"num_groups" not in inspect_kwargs(T.nnet.conv2d)):
raise RuntimeError("num_groups > 1 requires "
"Theano 0.10 or later") # pragma: no cover
self.num_groups = num_groups
self.W = self.add_param(W, self.get_W_shape(), name="W",
trainable=trainable, regularizable=regularizable)
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_filters,) + self.output_shape[2:]
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
trainable=trainable,
regularizable=False)
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[1] // self.num_groups
return (self.num_filters, num_input_channels) + self.filter_size
def get_output_shape_for(self, input_shape):
pad = self.pad if isinstance(self.pad, tuple) else (self.pad,) * self.n
batchsize = input_shape[0]
return ((batchsize, self.num_filters) +
tuple(conv_output_length(input, filter, stride, p)
for input, filter, stride, p
in zip(input_shape[2:], self.filter_size,
self.stride, pad)))
def get_output_for(self, input, **kwargs):
conved = self.convolve(input, **kwargs)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + T.shape_padleft(self.b, 1)
else:
activation = conved + self.b.dimshuffle(('x', 0) + ('x',) * self.n)
return self.nonlinearity(activation)
def convolve(self, input, **kwargs):
"""
Symbolically convolves `input` with ``self.W``, producing an output of
shape ``self.output_shape``. To be implemented by subclasses.
Parameters
----------
input : Theano tensor
The input minibatch to convolve
**kwargs
Any additional keyword arguments from :meth:`get_output_for`
Returns
-------
Theano tensor
`input` convolved according to the configuration of this layer,
without any bias or nonlinearity applied.
"""
raise NotImplementedError("BaseConvLayer does not implement the "
"convolve() method. You will want to "
"use a subclass such as Conv2DLayer.")
class Conv1DLayer(BaseConvLayer):
"""
lasagne.layers.Conv1DLayer(incoming, num_filters, filter_size, stride=1,
pad=0, untie_biases=False, W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify,
flip_filters=True, convolution=lasagne.theano_extensions.conv.conv1d_mc0,
**kwargs)
1D convolutional layer
Performs a 1D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 3D tensor, with shape
``(batch_size, num_input_channels, input_length)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 1-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 1-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
An integer or a 1-element tuple results in symmetric zero-padding of
the given size on both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
matrix (2D).
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 3D tensor with shape
``(num_filters, num_input_channels, filter_length)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, input_length)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
flip_filters : bool (default: True)
Whether to flip the filters before sliding them over the input,
performing a convolution (this is the default), or not to flip them and
perform a correlation. Note that for some other convolutional layers in
Lasagne, flipping incurs an overhead and is disabled by default --
check the documentation when using learned weights from another layer.
num_groups : int (default: 1)
The number of groups to split the input channels and output channels
into, such that data does not cross the group boundaries. Requires the
number of channels to be divisible by the number of groups, and
requires Theano 0.10 or later for more than one group.
convolution : callable
The convolution implementation to use. The
`lasagne.theano_extensions.conv` module provides some alternative
implementations for 1D convolutions, because the Theano API only
features a 2D convolution implementation. Usually it should be fine
to leave this at the default value. Note that not all implementations
support all settings for `pad`, `subsample` and `num_groups`.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
"""
def __init__(self, incoming, num_filters, filter_size, stride=1,
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, flip_filters=True,
convolution=conv.conv1d_mc0, **kwargs):
super(Conv1DLayer, self).__init__(incoming, num_filters, filter_size,
stride, pad, untie_biases, W, b,
nonlinearity, flip_filters, n=1,
**kwargs)
self.convolution = convolution
def convolve(self, input, **kwargs):
border_mode = 'half' if self.pad == 'same' else self.pad
extra_kwargs = {}
if self.num_groups > 1: # pragma: no cover
extra_kwargs['num_groups'] = self.num_groups
conved = self.convolution(input, self.W,
self.input_shape, self.get_W_shape(),
subsample=self.stride,
border_mode=border_mode,
filter_flip=self.flip_filters,
**extra_kwargs)
return conved
class Conv2DLayer(BaseConvLayer):
"""
lasagne.layers.Conv2DLayer(incoming, num_filters, filter_size,
stride=(1, 1), pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, flip_filters=True,
convolution=theano.tensor.nnet.conv2d, **kwargs)
2D convolutional layer
Performs a 2D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 2-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 2-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
all borders, a tuple of two integers allows different symmetric padding
per dimension.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
3D tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 4D tensor with shape
``(num_filters, num_input_channels, filter_rows, filter_columns)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, output_rows, output_columns)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
flip_filters : bool (default: True)
Whether to flip the filters before sliding them over the input,
performing a convolution (this is the default), or not to flip them and
perform a correlation. Note that for some other convolutional layers in
Lasagne, flipping incurs an overhead and is disabled by default --
check the documentation when using learned weights from another layer.
num_groups : int (default: 1)
The number of groups to split the input channels and output channels
into, such that data does not cross the group boundaries. Requires the
number of channels to be divisible by the number of groups, and
requires Theano 0.10 or later for more than one group.
convolution : callable
The convolution implementation to use. Usually it should be fine to
leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
"""
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, flip_filters=True,
convolution=T.nnet.conv2d,
trainable=True, regularizable=True,
**kwargs):
super(Conv2DLayer, self).__init__(incoming, num_filters, filter_size,
stride, pad, untie_biases, W, b,
nonlinearity, flip_filters, n=2,
trainable=trainable, regularizable=regularizable,
**kwargs)
self.convolution = convolution
def convolve(self, input, **kwargs):
border_mode = 'half' if self.pad == 'same' else self.pad
extra_kwargs = {}
if self.num_groups > 1: # pragma: no cover
extra_kwargs['num_groups'] = self.num_groups
conved = self.convolution(input, self.W,
self.input_shape, self.get_W_shape(),
subsample=self.stride,
border_mode=border_mode,
filter_flip=self.flip_filters,
**extra_kwargs)
return conved
class Conv3DLayer(BaseConvLayer): # pragma: no cover
"""
lasagne.layers.Conv3DLayer(incoming, num_filters, filter_size,
stride=(1, 1, 1), pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, flip_filters=True,
convolution=theano.tensor.nnet.conv3d, **kwargs)
3D convolutional layer
Performs a 3D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 5D tensor, with shape ``(batch_size,
num_input_channels, input_depth, input_rows, input_columns)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 3-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 3-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
all borders, a tuple of two integers allows different symmetric padding
per dimension.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
4D tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 5D tensor with shape ``(num_filters,
num_input_channels, filter_depth, filter_rows, filter_columns)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, output_depth, output_rows, output_columns)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
flip_filters : bool (default: True)
Whether to flip the filters before sliding them over the input,
performing a convolution (this is the default), or not to flip them and
perform a correlation. Note that for some other convolutional layers in
Lasagne, flipping incurs an overhead and is disabled by default --
check the documentation when using learned weights from another layer.
num_groups : int (default: 1)
The number of groups to split the input channels and output channels
into, such that data does not cross the group boundaries. Requires the
number of channels to be divisible by the number of groups, and
requires Theano 0.10 or later for more than one group.
convolution : callable
The convolution implementation to use. Usually it should be fine to
leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
"""
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1, 1),
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, flip_filters=True,
convolution=None, **kwargs):
super(Conv3DLayer, self).__init__(incoming, num_filters, filter_size,
stride, pad, untie_biases, W, b,
nonlinearity, flip_filters, n=3,
**kwargs)
if convolution is None:
convolution = T.nnet.conv3d
self.convolution = convolution
def convolve(self, input, **kwargs):
border_mode = 'half' if self.pad == 'same' else self.pad
extra_kwargs = {}
if self.num_groups > 1: # pragma: no cover
extra_kwargs['num_groups'] = self.num_groups
conved = self.convolution(input, self.W,
self.input_shape, self.get_W_shape(),
subsample=self.stride,
border_mode=border_mode,
filter_flip=self.flip_filters,
**extra_kwargs)
return conved
if not hasattr(T.nnet, 'conv3d'): # pragma: no cover
# Hide Conv3DLayer for old Theano versions
del Conv3DLayer
__all__.remove('Conv3DLayer')
class TransposedConv2DLayer(BaseConvLayer):
"""
lasagne.layers.TransposedConv2DLayer(incoming, num_filters, filter_size,
stride=(1, 1), crop=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False, **kwargs)
2D transposed convolution layer
Performs the backward pass of a 2D convolution (also called transposed
convolution, fractionally-strided convolution or deconvolution in the
literature) on its input and optionally adds a bias and applies an
elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 2-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 2-element tuple specifying the stride of the
transposed convolution operation. For the transposed convolution, this
gives the dilation factor for the input -- increasing it increases the
output size.
crop : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the transposed convolution is computed where the input and
the filter overlap by at least one position (a full convolution). When
``stride=1``, this yields an output that is larger than the input by
``filter_size - 1``. It can be thought of as a valid convolution padded
with zeros. The `crop` argument allows you to decrease the amount of
this zero-padding, reducing the output size. It is the counterpart to
the `pad` argument in a non-transposed convolution.
A single integer results in symmetric cropping of the given size on all
borders, a tuple of two integers allows different symmetric cropping
per dimension.
``'full'`` disables zero-padding. It is is equivalent to computing the
convolution wherever the input and the filter fully overlap.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no cropping / a full convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
3D tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 4D tensor with shape
``(num_input_channels, num_filters, filter_rows, filter_columns)``.
Note that the first two dimensions are swapped compared to a
non-transposed convolution.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, output_rows, output_columns)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
flip_filters : bool (default: False)
Whether to flip the filters before sliding them over the input,
performing a convolution, or not to flip them and perform a
correlation (this is the default). Note that this flag is inverted
compared to a non-transposed convolution.
output_size : int or iterable of int or symbolic tuple of ints
The output size of the transposed convolution. Allows to specify
which of the possible output shapes to return when stride > 1.
If not specified, the smallest shape will be returned.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
Notes
-----
The transposed convolution is implemented as the backward pass of a
corresponding non-transposed convolution. It can be thought of as dilating
the input (by adding ``stride - 1`` zeros between adjacent input elements),
padding it with ``filter_size - 1 - crop`` zeros, and cross-correlating it
with the filters. See [1]_ for more background.
Examples
--------
To transpose an existing convolution, with tied filter weights:
>>> from lasagne.layers import Conv2DLayer, TransposedConv2DLayer
>>> conv = Conv2DLayer((None, 1, 32, 32), 16, 3, stride=2, pad=2)
>>> deconv = TransposedConv2DLayer(conv, conv.input_shape[1],
... conv.filter_size, stride=conv.stride, crop=conv.pad,
... W=conv.W, flip_filters=not conv.flip_filters)
References
----------
.. [1] Vincent Dumoulin, Francesco Visin (2016):
A guide to convolution arithmetic for deep learning. arXiv.
http://arxiv.org/abs/1603.07285,
https://github.com/vdumoulin/conv_arithmetic
"""
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
crop=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, flip_filters=False,
output_size=None, **kwargs):
# output_size must be set before calling the super constructor
if (not isinstance(output_size, T.Variable) and
output_size is not None):
output_size = as_tuple(output_size, 2, int_types)
self.output_size = output_size
super(TransposedConv2DLayer, self).__init__(
incoming, num_filters, filter_size, stride, crop, untie_biases,
W, b, nonlinearity, flip_filters, n=2, **kwargs)
# rename self.pad to self.crop:
self.crop = self.pad
del self.pad
def get_W_shape(self):
num_input_channels = self.input_shape[1]
# first two sizes are swapped compared to a forward convolution
return (num_input_channels, self.num_filters) + self.filter_size
def get_output_shape_for(self, input_shape):
if self.output_size is not None:
size = self.output_size
if isinstance(self.output_size, T.Variable):
size = (None, None)
return input_shape[0], self.num_filters, size[0], size[1]
# If self.output_size is not specified, return the smallest shape
# when called from the constructor, self.crop is still called self.pad:
crop = getattr(self, 'crop', getattr(self, 'pad', None))
crop = crop if isinstance(crop, tuple) else (crop,) * self.n
batchsize = input_shape[0]
return ((batchsize, self.num_filters) +
tuple(conv_input_length(input, filter, stride, p)
for input, filter, stride, p
in zip(input_shape[2:], self.filter_size,
self.stride, crop)))
def convolve(self, input, **kwargs):
border_mode = 'half' if self.crop == 'same' else self.crop
op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(
imshp=self.output_shape,
kshp=self.get_W_shape(),
subsample=self.stride, border_mode=border_mode,
filter_flip=not self.flip_filters)
output_size = self.output_shape[2:]
if isinstance(self.output_size, T.Variable):
output_size = self.output_size
elif any(s is None for s in output_size):
output_size = self.get_output_shape_for(input.shape)[2:]
conved = op(self.W, input, output_size)
return conved
Deconv2DLayer = TransposedConv2DLayer
class TransposedConv3DLayer(BaseConvLayer): # pragma: no cover
"""
lasagne.layers.TransposedConv3DLayer(incoming, num_filters, filter_size,
stride=(1, 1, 1), crop=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False, **kwargs)
3D transposed convolution layer
Performs the backward pass of a 3D convolution (also called transposed
convolution, fractionally-strided convolution or deconvolution in the
literature) on its input and optionally adds a bias and applies an
elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 5D tensor, with shape
``(batch_size, num_input_channels, input_depth, input_rows,
input_columns)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 3-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 3-element tuple specifying the stride of the
transposed convolution operation. For the transposed convolution, this
gives the dilation factor for the input -- increasing it increases the
output size.
crop : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the transposed convolution is computed where the input and
the filter overlap by at least one position (a full convolution). When
``stride=1``, this yields an output that is larger than the input by
``filter_size - 1``. It can be thought of as a valid convolution padded
with zeros. The `crop` argument allows you to decrease the amount of
this zero-padding, reducing the output size. It is the counterpart to
the `pad` argument in a non-transposed convolution.
A single integer results in symmetric cropping of the given size on all
borders, a tuple of three integers allows different symmetric cropping
per dimension.
``'full'`` disables zero-padding. It is is equivalent to computing the
convolution wherever the input and the filter fully overlap.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no cropping / a full convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
3D tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 5D tensor with shape
``(num_input_channels, num_filters, filter_rows, filter_columns)``.
Note that the first two dimensions are swapped compared to a
non-transposed convolution.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, output_rows, output_columns)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
flip_filters : bool (default: False)
Whether to flip the filters before sliding them over the input,
performing a convolution, or not to flip them and perform a
correlation (this is the default). Note that this flag is inverted
compared to a non-transposed convolution.
output_size : int or iterable of int or symbolic tuple of ints
The output size of the transposed convolution. Allows to specify
which of the possible output shapes to return when stride > 1.
If not specified, the smallest shape will be returned.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
Notes
-----
The transposed convolution is implemented as the backward pass of a
corresponding non-transposed convolution. It can be thought of as dilating
the input (by adding ``stride - 1`` zeros between adjacent input elements),
padding it with ``filter_size - 1 - crop`` zeros, and cross-correlating it
with the filters. See [1]_ for more background.
Examples
--------
To transpose an existing convolution, with tied filter weights:
>>> from lasagne.layers import Conv3DLayer, TransposedConv3DLayer
>>> conv = Conv3DLayer((None, 1, 32, 32, 32), 16, 3, stride=2, pad=2)
>>> deconv = TransposedConv3DLayer(conv, conv.input_shape[1],
... conv.filter_size, stride=conv.stride, crop=conv.pad,
... W=conv.W, flip_filters=not conv.flip_filters)
References
----------
.. [1] Vincent Dumoulin, Francesco Visin (2016):
A guide to convolution arithmetic for deep learning. arXiv.
http://arxiv.org/abs/1603.07285,
https://github.com/vdumoulin/conv_arithmetic
"""
def __init__(self, incoming, num_filters, filter_size,
stride=(1, 1, 1), crop=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, flip_filters=False,
output_size=None, **kwargs):
# output_size must be set before calling the super constructor
if (not isinstance(output_size, T.Variable) and
output_size is not None):
output_size = as_tuple(output_size, 3, int_types)
self.output_size = output_size
BaseConvLayer.__init__(self, incoming, num_filters, filter_size,
stride, crop, untie_biases, W, b,
nonlinearity, flip_filters, n=3, **kwargs)
# rename self.pad to self.crop:
self.crop = self.pad
del self.pad
def get_W_shape(self):
num_input_channels = self.input_shape[1]
# first two sizes are swapped compared to a forward convolution
return (num_input_channels, self.num_filters) + self.filter_size
def get_output_shape_for(self, input_shape):
if self.output_size is not None:
size = self.output_size
if isinstance(self.output_size, T.Variable):
size = (None, None, None)
return input_shape[0], self.num_filters, size[0], size[1], size[2]
# If self.output_size is not specified, return the smallest shape
# when called from the constructor, self.crop is still called self.pad:
crop = getattr(self, 'crop', getattr(self, 'pad', None))
crop = crop if isinstance(crop, tuple) else (crop,) * self.n
batchsize = input_shape[0]
return ((batchsize, self.num_filters) +
tuple(conv_input_length(input, filter, stride, p)
for input, filter, stride, p
in zip(input_shape[2:], self.filter_size,
self.stride, crop)))
def convolve(self, input, **kwargs):
border_mode = 'half' if self.crop == 'same' else self.crop
op = T.nnet.abstract_conv.AbstractConv3d_gradInputs(
imshp=self.output_shape,
kshp=self.get_W_shape(),
subsample=self.stride, border_mode=border_mode,
filter_flip=not self.flip_filters)
output_size = self.output_shape[2:]
if isinstance(self.output_size, T.Variable):
output_size = self.output_size
elif any(s is None for s in output_size):
output_size = self.get_output_shape_for(input.shape)[2:]
conved = op(self.W, input, output_size)
return conved
Deconv3DLayer = TransposedConv3DLayer
if not hasattr(T.nnet.abstract_conv,
'AbstractConv3d_gradInputs'): # pragma: no cover
# Hide TransposedConv3DLayer for old Theano versions
del TransposedConv3DLayer, Deconv3DLayer
__all__.remove('TransposedConv3DLayer')
__all__.remove('Deconv3DLayer')
class DilatedConv2DLayer(BaseConvLayer):
"""
lasagne.layers.DilatedConv2DLayer(incoming, num_filters, filter_size,
dilation=(1, 1), pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False, **kwargs)
2D dilated convolution layer
Performs a 2D convolution with dilated filters, then optionally adds a bias
and applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 2-element tuple specifying the size of the filters.
dilation : int or iterable of int
An integer or a 2-element tuple specifying the dilation factor of the
filters. A factor of :math:`x` corresponds to :math:`x - 1` zeros
inserted between adjacent filter elements.
pad : int, iterable of int, or 'valid' (default: 0)
The amount of implicit zero padding of the input.
This implementation does not support padding, the argument is provided
for compatibility to other convolutional layers only.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
3D tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 4D tensor with shape
``(num_input_channels, num_filters, filter_rows, filter_columns)``.
Note that the first two dimensions are swapped compared to a
non-dilated convolution.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, output_rows, output_columns)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
flip_filters : bool (default: False)
Whether to flip the filters before sliding them over the input,
performing a convolution, or not to flip them and perform a
correlation (this is the default).
This implementation does not support flipped filters, the argument is
provided for compatibility to other convolutional layers only.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable or expression
Variable or expression representing the filter weights.
b : Theano shared variable or expression
Variable or expression representing the biases.
Notes
-----
The dilated convolution is implemented as the backward pass of a
convolution wrt. weights, passing the filters as the output gradient.
It can be thought of as dilating the filters (by adding ``dilation - 1``
zeros between adjacent filter elements) and cross-correlating them with the
input. See [1]_ for more background.
References
----------
.. [1] Fisher Yu, Vladlen Koltun (2016),
Multi-Scale Context Aggregation by Dilated Convolutions. ICLR 2016.
http://arxiv.org/abs/1511.07122, https://github.com/fyu/dilation
"""
def __init__(self, incoming, num_filters, filter_size, dilation=(1, 1),
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, flip_filters=False,
**kwargs):
self.dilation = as_tuple(dilation, 2, int_types)
super(DilatedConv2DLayer, self).__init__(
incoming, num_filters, filter_size, 1, pad,
untie_biases, W, b, nonlinearity, flip_filters, n=2, **kwargs)
# remove self.stride:
del self.stride
# require valid convolution
if self.pad != (0, 0):
raise NotImplementedError(
"DilatedConv2DLayer requires pad=0 / (0,0) / 'valid', but "
"got %r. For a padded dilated convolution, add a PadLayer."
% (pad,))
# require unflipped filters
if self.flip_filters:
raise NotImplementedError(
"DilatedConv2DLayer requires flip_filters=False.")
def get_W_shape(self):
num_input_channels = self.input_shape[1]
# first two sizes are swapped compared to a forward convolution
return (num_input_channels, self.num_filters) + self.filter_size
def get_output_shape_for(self, input_shape):
batchsize = input_shape[0]
return ((batchsize, self.num_filters) +
tuple(conv_output_length(input, (filter-1) * dilate + 1, 1, 0)
for input, filter, dilate
in zip(input_shape[2:], self.filter_size,
self.dilation)))
def convolve(self, input, **kwargs):
# we perform a convolution backward pass wrt weights,
# passing kernels as output gradient
imshp = self.input_shape
kshp = self.output_shape
# and swapping channels and batchsize
imshp = (imshp[1], imshp[0]) + imshp[2:]
kshp = (kshp[1], kshp[0]) + kshp[2:]
op = T.nnet.abstract_conv.AbstractConv2d_gradWeights(
imshp=imshp, kshp=kshp,
subsample=self.dilation, border_mode='valid',
filter_flip=False)
output_size = self.output_shape[2:]
if any(s is None for s in output_size):
output_size = self.get_output_shape_for(input.shape)[2:]
conved = op(input.transpose(1, 0, 2, 3), self.W, output_size)
return conved.transpose(1, 0, 2, 3)
|
#!/usr/bin/env python
# ===========================================================
from sklearn import tree
import pandas as pd
import time
import sys
# Import lib
# ===========================================================
import csv
from datascience import *
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('fivethirtyeight')
import collections
import math
from tqdm import tqdm
from time import sleep
from DecisionTreeFunctions import *
rate = float(sys.argv[1])
# print(int(rate))
# Initialize useful data
# ===========================================================
# with open('clinvar_conflicting_clean.csv', 'r') as f:
# reader = csv.reader(f)
# temp_rows = list(reader)
df = pd.read_csv('clinvar_conflicting_clean.csv', low_memory=False)
columns_to_change = ['ORIGIN', 'EXON', 'INTRON', 'STRAND', 'LoFtool', 'CADD_PHRED', 'CADD_RAW', 'BLOSUM62']
df[['CLNVI', 'MC', 'SYMBOL', 'Feature_type', 'Feature', 'BIOTYPE',
'cDNA_position', 'CDS_position', 'Protein_position', 'Amino_acids', 'Codons',
'BAM_EDIT', 'SIFT', 'PolyPhen']] = df[['CLNVI', 'MC', 'SYMBOL', 'Feature_type', 'Feature', 'BIOTYPE',
'cDNA_position', 'CDS_position', 'Protein_position', 'Amino_acids', 'Codons',
'BAM_EDIT', 'SIFT', 'PolyPhen']].fillna(value="null")
final_acc = 0
start = time.time()
for pp in range(10):
# Training
# ===========================================================
df = df.sample(n = df.shape[0])
all_rows = df.values.tolist()
row_num = len(all_rows)
training_percentage = rate # percent of partition of training dataset
training_size = int(row_num * training_percentage)
testing_size = row_num - training_size
training_attribute = list(df.columns)
training_data = all_rows[: training_size] # training data should include header row
testing_data = all_rows[training_size: ] # testing data don't need to include header row
tree = DecisionTree(training_attribute, training_data, "CART")
# Testing and Computing TN, TP, FN, FP, etc.
# ===========================================================
ROC = Table(make_array('CUTOFF', 'TN', 'FN', 'FP', 'TP', 'ACC'))
step_size = 0.05
CMap = {0: 'TN', 1: 'FN', 2: 'FP', 3: 'TP'}
# 00(0) -> TN
# 01(1) -> FN
# 10(2) -> FP
# 11(3) -> TP
for cutoff in np.arange(0, 1 + step_size, step_size):
Confusion = {'TN': 0, 'FN': 0, 'FP': 0, 'TP': 0}
for row in testing_data:
# prediction is a counter of label 1 and 0
pred_counter = tree.classify(row, tree.root)
true_rate = pred_counter.get(1, 0) / (pred_counter.get(1, 0) + pred_counter.get(0, 0) + 0.00000001)
# print(true_rate)
true_pred = 1 if true_rate >= cutoff else 0
indicator = (true_pred << 1) + row[-1]
# accordingly update confusion matrix
Confusion[CMap[indicator]] += 1
# concatenate the confusion matrix values into the overall ROC Table
thisline = [cutoff] + list(Confusion.values()) + [(Confusion['TP'] + Confusion['TN']) / sum(Confusion.values())]
ROC = ROC.with_row(thisline)
ROC = ROC.with_column('SENSITIVITY', ROC.apply(lambda TP, FN: TP / (TP + FN + 0.00000001), 'TP', 'FN'))
ROC = ROC.with_column('FPR', ROC.apply(lambda TN, FP: FP / (TN + FP + 0.00000001), 'TN', 'FP'))
ROC = ROC.with_column('FMEAS', ROC.apply(lambda TP, FP, FN: 2 * (TP / (TP + FN + 0.00000001)) * (TP / (TP + FP + 0.00000001)) / (TP / (TP + FN + 0.00000001) + TP / (TP + FP + 0.00000001) + 0.00000001), 'TP', 'FP', 'FN'))
# Original Testing
# ===========================================================
accuracy = []
for row in testing_data:
classification = tree.classify(row, tree.root)
if len(classification) == 1:
accuracy.append(int(classification.get(row[-1], 0) > 0))
else:
tot = sum(classification.values())
accuracy.append(classification.get(row[-1], 0) / tot)
final_acc += sum(accuracy) / len(accuracy)
end = time.time()
print("Decision Tree Trained! Time: %.03fs" % ((end - start) / 10))
final_acc /= 10
print(final_acc, ((end - start) / 10)) |
class MuzzleTestCase:
pass
|
from selenium import webdriver
import time
my_file = open("url_list.txt", 'r')
url_list = [""]
with open("url_list.txt") as file:
while (line := file.readline().rstrip()):
url_list.append(line)
print("here it is")
print(url_list)
for i in range (1, len(url_list)):
driver = webdriver.Chrome()
driver.get(url_list[i])
firstName = driver.find_element_by_xpath('//*[@id="first_name"]')
firstName.send_keys('David')
lastName = driver.find_element_by_xpath('//*[@id="last_name"]')
lastName.send_keys('Solinsky')
email = driver.find_element_by_xpath('//*[@id="email"]')
email.send_keys('dsolinsky98@gmail.com')
phone = driver.find_element_by_xpath('//*[@id="phone"]')
phone.send_keys('5715247794')
#Select location
location = driver.find_element_by_xpath('//*[@id="job_application_location"]')
location.send_keys('Herndon, Virginia, United States')
time.sleep(1)
locationConfirm = driver.find_element_by_xpath('//*[@id="ui-id-1"]/li[1]')
locationConfirm.click()
#Upload Resume
#time.sleep(1)
#upload = driver.find_element_by_xpath('//*[@id="main_fields"]/div[8]/div/div[3]/a[1]').send_keys("W:\Job Application-Resume\David_Solinsky_Resume.docx")
#time.sleep(1)
#Select school
school = driver.find_element_by_xpath('//*[@id="s2id_education_school_name_0"]/a').click()
schoolTextBox = driver.find_element_by_xpath('//*[@id="s2id_autogen6_search"]').send_keys('College of William and Mary')
time.sleep(1)
schoolTextBox = driver.find_element_by_xpath('//*[@id="s2id_autogen6_search"]').send_keys(u'\ue007')
#Select degree
degree = driver.find_element_by_xpath('//*[@id="s2id_education_degree_0"]/a').click()
time.sleep(1)
degreeBox = driver.find_element_by_xpath('//*[@id="select2-result-label-12"]').click()
#Select discipline
discipline = driver.find_element_by_xpath('//*[@id="s2id_education_discipline_0"]/a').click()
disciplineTextBox = driver.find_element_by_xpath('//*[@id="s2id_autogen8_search"]').send_keys('Computer Science')
time.sleep(1)
discipline = driver.find_element_by_xpath('//*[@id="s2id_autogen8_search"]').send_keys(u'\ue007')
#textboxes
startMonth = driver.find_element_by_xpath('//*[@id="education_section"]/div/fieldset/div[4]/fieldset/input[1]')
startMonth.send_keys('08')
startYear = driver.find_element_by_xpath('//*[@id="education_section"]/div/fieldset/div[4]/fieldset/input[2]')
startYear.send_keys('2017')
endMonth = driver.find_element_by_xpath('//*[@id="education_section"]/div/fieldset/div[5]/fieldset/input[1]')
endMonth.send_keys('01')
endYear = driver.find_element_by_xpath('//*[@id="education_section"]/div/fieldset/div[5]/fieldset/input[2]')
endYear.send_keys('2022')
linkedIn = driver.find_element_by_xpath('//*[@id="job_application_answers_attributes_2_text_value"]')
linkedIn.send_keys('https://www.linkedin.com/in/david-solinsky/')
github = driver.find_element_by_xpath('//*[@id="job_application_answers_attributes_3_text_value"]')
github.send_keys('https://github.com/dsol-cpu')
q1 = driver.find_element_by_xpath('//*[@id="s2id_job_application_answers_attributes_4_answer_selected_options_attributes_4_question_option_id"]/a').click()
time.sleep(1)
q1 = driver.find_element_by_xpath('//*[@id="select2-result-label-153"]').click()
q2 = driver.find_element_by_xpath('//*[@id="s2id_job_application_answers_attributes_5_answer_selected_options_attributes_5_question_option_id"]/a').click()
time.sleep(1)
q2Box = driver.find_element_by_xpath('//*[@id="select2-result-label-156"]').click()
q3 = driver.find_element_by_xpath('//*[@id="s2id_job_application_answers_attributes_6_answer_selected_options_attributes_6_question_option_id"]/a').click()
time.sleep(1)
q3Box = driver.find_element_by_xpath('//*[@id="select2-result-label-168"]').click()
q4 = driver.find_element_by_xpath('//*[@id="s2id_job_application_answers_attributes_7_answer_selected_options_attributes_7_question_option_id"]/a').click()
time.sleep(1)
q4Box = driver.find_element_by_xpath('//*[@id="select2-result-label-170"]').click()
q5 = driver.find_element_by_xpath('//*[@id="s2id_job_application_answers_attributes_8_answer_selected_options_attributes_8_question_option_id"]/a').click()
time.sleep(1)
q5Box = driver.find_element_by_xpath('//*[@id="select2-result-label-172"]').click()
checkBox1 = driver.find_element_by_xpath('//*[@id="demographic_questions"]/div[1]/label[1]/input').click()
checkBox2 = driver.find_element_by_xpath('//*[@id="demographic_questions"]/div[2]/label[2]/input').click()
checkBox3 = driver.find_element_by_xpath('//*[@id="demographic_questions"]/div[3]/label[6]/input').click()
checkBox4 = driver.find_element_by_xpath('//*[@id="demographic_questions"]/div[4]/label[2]/input').click() |
import firebase_admin
from firebase_admin import credentials, db
import datetime
class FirebaseStream:
key, path, type, pathD, data, child = str, str, str, str, str, str
@staticmethod
def initialize_app(path: str, database_url: str):
cred = credentials.Certificate(path)
firebase_admin.initialize_app(cred, {
'databaseURL': database_url
})
def __init__(self, path: str, debug: bool = True) -> object:
self.path = path
self.debug = debug
ref = db.reference(path)
self.request_count = 0
self.stream = ref.listen(self.listener)
def listener(self, event):
self.request_count += 1
if self.request_count != 1 and "last_updated" not in event.data: # "Updated" when starts
print("REQUEST INDEX: {}".format(self.request_count))
now = str(datetime.datetime.now())
self.type = (event.event_type.upper()) # can be 'put' or 'patch'
self.pathD = event.path # relative to the reference, it seems
self.data = event.data # new data at /reference/event.path. None if deleted
self.child = db.reference(self.path + event.path.rsplit('/', 1)[0]).get()
self.key = (event.path.split('/', 2)[1])
if self.debug:
db.reference(self.path + event.path.rsplit('/', 1)[0]).update({
"last_updated": now
})
print({
"type": self.type,
"path": self.pathD,
"data": self.data,
"child": self.child,
"key": self.key
})
def get_path(self):
return self.pathD
def get_type(self):
return self.type
def get_key(self):
return self.key
def get_data(self):
return self.data
FirebaseStream.initialize_app("serviceAccountKey.json", "https://adminhbeg.firebaseio.com/")
stream = FirebaseStream("blogs/")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : 陈坤泽
# @Email : 877362867@qq.com
# @Date : 2021/06/03 14:22
import textwrap
from collections import defaultdict, Counter
import math
import re
import sys
from pyxllib.prog.newbie import typename
from pyxllib.text.pupil import listalign, int2myalphaenum
def natural_sort_key(key):
"""
>>> natural_sort_key('0.0.43') < natural_sort_key('0.0.43.1')
True
>>> natural_sort_key('0.0.2') < natural_sort_key('0.0.12')
True
"""
def convert(text):
return int(text) if text.isdigit() else text.lower()
return [convert(c) for c in re.split('([0-9]+)', str(key))]
def natural_sort(ls, only_use_digits=False):
""" 自然排序
:param only_use_digits: 正常会用数字作为分隔,切割每一部分进行比较
如果只想比较数值部分,可以only_use_digits=True
>>> natural_sort(['0.1.12', '0.0.10', '0.0.23'])
['0.0.10', '0.0.23', '0.1.12']
"""
if only_use_digits:
def func(key):
return [int(c) for c in re.split('([0-9]+)', str(key)) if c.isdigit()]
else:
func = natural_sort_key
return sorted(ls, key=func)
def argsort(seq):
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key=seq.__getitem__)
def make_index_function(li, *, start=0, nan=None):
""" 返回一个函数,输入值,返回对应下标,找不到时返回 not_found
:param li: 列表数据
:param start: 起始下标
:param nan: 找不到对应元素时的返回值
注意这里找不到默认不是-1,而是li的长度,这样用于排序时,找不到的默认会排在尾巴
>>> func = make_index_function(['少儿', '小学', '初中', '高中'])
>>> sorted(['初中', '小学', '高中'], key=func)
['小学', '初中', '高中']
# 不在枚举项目里的,会统一列在最后面
>>> sorted(['初中', '小学', '高中', '幼儿'], key=func)
['小学', '初中', '高中', '幼儿']
"""
data = {x: i for i, x in enumerate(li, start=start)}
if nan is None:
nan = len(li)
def warpper(x, default=None):
if default is None:
default = nan
return data.get(x, default)
return warpper
class ValuesStat:
""" 一串数值的相关统计分析 """
def __init__(self, values):
self.values = values
self.n = len(values)
self.sum = sum(values)
# np有标准差等公式,但这是basic底层库,不想依赖太多第三方库,所以手动实现
if self.n:
self.mean = self.sum / self.n
self.std = math.sqrt((sum([(x - self.mean) ** 2 for x in values]) / self.n))
self.min, self.max = min(values), max(values)
else:
self.mean = self.std = self.min = self.max = float('nan')
def __len__(self):
return self.n
def summary(self, valfmt='g'):
""" 输出性能分析报告,data是每次运行得到的时间数组
:param valfmt: 数值显示的格式
g是比较智能的一种模式
也可以用 '.3f'表示保留3位小数
也可以传入长度5的格式清单,表示 [和、均值、标准差、最小值、最大值] 一次展示的格式
"""
if isinstance(valfmt, str):
valfmt = [valfmt] * 5
if self.n > 1: # 有多轮,则应该输出些参考统计指标
ls = [f'总和: {self.sum:{valfmt[0]}}', f'均值标准差: {self.mean:{valfmt[1]}}±{self.std:{valfmt[2]}}',
f'总数: {self.n}', f'最小值: {self.min:{valfmt[3]}}', f'最大值: {self.max:{valfmt[4]}}']
return '\t'.join(ls)
elif self.n == 1: # 只有一轮,则简单地输出即可
return f'{self.sum:{valfmt[0]}}'
else:
raise ValueError
class Groups:
def __init__(self, data):
""" 分组
:param data: 输入字典结构直接赋值
或者其他结构,会自动按相同项聚合
TODO 显示一些数值统计信息,甚至图表
TODO 转文本表达,方便bc比较
"""
if not isinstance(data, dict):
new_data = dict()
# 否要要转字典类型,自动从1~n编组
for k, v in enumerate(data, start=1):
new_data[k] = v
data = new_data
self.data = data # 字典存原数据
self.ctr = Counter({k: len(x) for k, x in self.data.items()}) # 计数
self.stat = ValuesStat(self.ctr.values()) # 综合统计数据
def __repr__(self):
ls = []
for i, (k, v) in enumerate(self.data.items(), start=1):
ls.append(f'{i}, {k}:{v}')
return '\n'.join(ls)
@classmethod
def groupby(cls, ls, key, ykey=None):
"""
:param ls: 可迭代等数组类型
:param key: 映射规则,ls中每个元素都会被归到映射的key组上
Callable[Any, 不可变类型]
None,未输入时,默认输入的ls已经是分好组的数据
:param ykey: 是否对分组后存储的内容y,也做一个函数映射
:return: dict
"""
data = defaultdict(list)
for x in ls:
k = key(x)
if ykey:
x = ykey(x)
data[k].append(x)
return cls(data)
def intersection_split(a, b):
""" 输入两个对象a,b,可以是dict或set类型,list等
会分析出二者共有的元素值关系
返回值是 ls1, ls2, ls3, ls4,大部分是list类型,但也有可能遵循原始情况是set类型
ls1:a中,与b共有key的元素值
ls2:a中,独有key的元素值
ls3:b中,与a共有key的元素值
ls4:b中,独有key的元素值
"""
# 1 获得集合的key关系
keys1 = set(a)
keys2 = set(b)
keys0 = keys1 & keys2 # 两个集合共有的元素
# TODO 如果是字典,希望能保序
# 2 组合出ls1、ls2、ls3、ls4
def split(t, s, ks):
"""原始元素为t,集合化的值为s,共有key是ks"""
if isinstance(t, (set, list, tuple)):
return ks, s - ks
elif isinstance(t, dict):
ls1 = sorted(map(lambda x: (x, t[x]), ks), key=lambda x: natural_sort_key(x[0]))
ls2 = sorted(map(lambda x: (x, t[x]), s - ks), key=lambda x: natural_sort_key(x[0]))
return ls1, ls2
else:
# dprint(type(s)) # s不是可以用来进行集合规律分析的类型
raise ValueError(f'{type(s)}不是可以用来进行集合规律分析的类型')
ls1, ls2 = split(a, keys1, keys0)
ls3, ls4 = split(b, keys2, keys0)
return ls1, ls2, ls3, ls4
def matchpairs(xs, ys, cmp_func, least_score=sys.float_info.epsilon, *,
key=None, index=False):
r""" 匹配两组数据
:param xs: 第一组数据
:param ys: 第二组数据
:param cmp_func: 所用的比较函数,值越大表示两个对象相似度越高
:param least_score: 允许匹配的最低分,默认必须要大于0
:param key: 是否需要对xs, ys进行映射后再传入 cmp_func 操作
:param index: 返回的不是原值,而是下标
:return: 返回结构[(x1, y1, score1), (x2, y2, score2), ...],注意长度肯定不会超过min(len(xs), len(ys))
注意:这里的功能①不支持重复匹配,②任何一个x,y都有可能没有匹配到
如果每个x必须都要有一个匹配,或者支持重复配对,请到隔壁使用 MatchPairs
TODO 这里很多中间步骤结果都是很有分析价值的,能改成类,然后支持分析中间结果?
TODO 这样全量两两比较是很耗性能的,可以加个参数草算,不用精确计算的功能?
>>> xs, ys = [4, 6, 1, 2, 9, 4, 5], [1, 5, 8, 9, 2]
>>> cmp_func = lambda x,y: 1-abs(x-y)/max(x,y)
>>> matchpairs(xs, ys, cmp_func)
[(1, 1, 1.0), (2, 2, 1.0), (9, 9, 1.0), (5, 5, 1.0), (6, 8, 0.75)]
>>> matchpairs(ys, xs, cmp_func)
[(1, 1, 1.0), (5, 5, 1.0), (9, 9, 1.0), (2, 2, 1.0), (8, 6, 0.75)]
>>> matchpairs(xs, ys, cmp_func, 0.9)
[(1, 1, 1.0), (2, 2, 1.0), (9, 9, 1.0), (5, 5, 1.0)]
>>> matchpairs(xs, ys, cmp_func, 0.9, index=True)
[(2, 0, 1.0), (3, 4, 1.0), (4, 3, 1.0), (6, 1, 1.0)]
"""
# 0 实际计算使用的是 xs_, ys_
if key:
xs_ = [key(x) for x in xs]
ys_ = [key(y) for y in ys]
else:
xs_, ys_ = xs, ys
# 1 计算所有两两相似度
n, m = len(xs), len(ys)
all_pairs = []
for i in range(n):
for j in range(m):
score = cmp_func(xs_[i], ys_[j])
if score >= least_score:
all_pairs.append([i, j, score])
# 按分数权重排序,如果分数有很多相似并列,就只能按先来后到排序啦
all_pairs = sorted(all_pairs, key=lambda v: (-v[2], v[0], v[1]))
# 2 过滤出最终结果
pairs = []
x_used, y_used = set(), set()
for p in all_pairs:
i, j, score = p
if i not in x_used and j not in y_used:
if index:
pairs.append((i, j, score))
else:
pairs.append((xs[i], ys[j], score))
x_used.add(i)
y_used.add(j)
return pairs
def get_number_width(n):
""" 判断数值n的长度
>>> get_number_width(0)
Traceback (most recent call last):
AssertionError
>>> get_number_width(9)
1
>>> get_number_width(10)
2
>>> get_number_width(97)
2
"""
assert n > 0
return math.ceil(math.log10(n + 1))
class SearchBase:
""" 一个dfs、bfs模板类 """
def __init__(self, root):
"""
Args:
root: 根节点
"""
self.root = root
def get_neighbors(self, node):
""" 获得邻接节点,必须要用yield实现,方便同时支持dfs、bfs的使用
对于树结构而言,相当于获取直接子结点
这里默认是bs4中Tag规则;不同业务需求,可以重定义该函数
例如对图结构、board类型,可以在self存储图访问状态,在这里实现遍历四周的功能
"""
try:
for node in node.children:
yield node
except AttributeError:
pass
def dfs_nodes(self, node=None, depth=0):
""" 返回深度优先搜索得到的结点清单
:param node: 起始结点,默认是root根节点
:param depth: 当前node深度
:return: list,[(node1, depth1), (node2, depth2), ...]
"""
if not node:
node = self.root
ls = [(node, depth)]
for t in self.get_neighbors(node):
ls += self.dfs_nodes(t, depth + 1)
return ls
def bfs_nodes(self, node=None, depth=0):
if not node:
node = self.root
ls = [(node, depth)]
i = 0
while i < len(ls):
x, d = ls[i]
nodes = self.get_neighbors(x)
ls += [(t, d + 1) for t in nodes]
i += 1
return ls
def fmt_node(self, node, depth, *, prefix=' ', show_node_type=False):
""" node格式化显示 """
s1 = prefix * depth
s2 = typename(node) + ',' if show_node_type else ''
s3 = textwrap.shorten(str(node), 200)
return s1 + s2 + s3
def fmt_nodes(self, *, nodes=None, select_depth=None, linenum=False,
msghead=True, show_node_type=False, prefix=' '):
""" 结点清单格式化输出
:param nodes: 默认用dfs获得结点,也可以手动指定结点
:param prefix: 缩进格式,默认用4个空格
:param select_depth: 要显示的深度
单个数字:获得指定层
Sequences: 两个整数,取出这个闭区间内的层级内容
:param linenum:节点从1开始编号
行号后面,默认会跟一个类似Excel列名的字母,表示层级深度
:param msghead: 第1行输出一些统计信息
:param show_node_type:
Requires
textwrap:用到shorten
align.listalign:生成列编号时对齐
"""
# 1 生成结点清单
ls = nodes if nodes else self.dfs_nodes()
total_node = len(ls)
total_depth = max(map(lambda x: x[1], ls))
head = f'总节点数:1~{total_node},总深度:0~{total_depth}'
# 2 过滤与重新整理ls(select_depth)
logo = True
cnt = 0
tree_num = 0
if isinstance(select_depth, int):
for i in range(total_node):
if ls[i][1] == select_depth:
ls[i][1] = 0
cnt += 1
logo = True
elif ls[i][1] < select_depth and logo: # 遇到第1个父节点添加一个空行
ls[i] = ''
tree_num += 1
logo = False
else: # 删除该节点,不做任何显示
ls[i] = None
head += f';挑选出的节点数:{cnt},所选深度:{select_depth},树数量:{tree_num}'
elif hasattr(select_depth, '__getitem__'):
for i in range(total_node):
if select_depth[0] <= ls[i][1] <= select_depth[1]:
ls[i][1] -= select_depth[0]
cnt += 1
logo = True
elif ls[i][1] < select_depth[0] and logo: # 遇到第1个父节点添加一个空行
ls[i] = ''
tree_num += 1
logo = False
else: # 删除该节点,不做任何显示
ls[i] = None
head += f';挑选出的节点数:{cnt},所选深度:{select_depth[0]}~{select_depth[1]},树数量:{tree_num}'
"""注意此时ls[i]的状态,有3种类型
(node, depth):tuple类型,第0个元素是node对象,第1个元素是该元素所处层级
None:已删除元素,但为了后续编号方便,没有真正的移出,而是用None作为标记
'':已删除元素,但这里涉及父节点的删除,建议此处留一个空行
"""
# 3 格式处理
def mystr(item):
return self.fmt_node(item[0], item[1], prefix=prefix, show_node_type=show_node_type)
line_num = listalign(range(1, total_node + 1))
res = []
for i in range(total_node):
if ls[i] is not None:
if isinstance(ls[i], str): # 已经指定该行要显示什么
res.append(ls[i])
else:
if linenum: # 增加了一个能显示层级的int2excel_col_name
res.append(line_num[i] + int2myalphaenum(ls[i][1]) + ' ' + mystr(ls[i]))
else:
res.append(mystr(ls[i]))
s = '\n'.join(res)
# 是否要添加信息头
if msghead:
s = head + '\n' + s
return s
|
"""
Find more at: https://github.com/l-farrell/isam-ob/settings
"""
import logging
from pyisam.util.model import DataObject
from pyisam.util.restclient import RESTClient
RISK_PROFILES = "/iam/access/v8/risk/profiles"
logger = logging.getLogger(__name__)
class RiskProfiles(object):
def __init__(self, base_url, username, password):
super(RiskProfiles, self).__init__()
self.client = RESTClient(base_url, username, password)
def create(self, description=None, name=None, active=None, attributes=None):
data = DataObject()
data.add_value_string("description", description)
data.add_value_string("name", name)
data.add_value("active", active)
data.add_value("attributes", attributes)
data.add_value("predefined", False)
response = self.client.post_json(RISK_PROFILES, data.data)
response.success = response.status_code == 201
return response
|
"""
cache implementations
Based on python-llfuse/examples/passthroughfs.py
written by Nicolaus Rath
Copyright © Nikolaus Rath <Nikolaus.org>
"""
import logging
from llfuse import ROOT_INODE, FUSEError
from threading import Lock
from collections import defaultdict
import errno
from Libfs.misc import calltrace_logger
logger = logging.getLogger(__name__)
class Memcache:
@calltrace_logger
def __init__(self):
self.fd2inode_map = dict()
self.inode2fd_map = dict()
self.inode2vpath_map = { ROOT_INODE: '/'}
self.lookup_cnt = defaultdict(lambda : 0)
self.fd_open_count = dict()
self.lookup_lock = Lock()
@calltrace_logger
def get_path_by_inode(self, inode):
logger.debug("get_path_by_inode: %s" % self.inode2vpath_map)
try:
val = self.inode2vpath_map[inode]
except KeyError:
raise FUSEError(errno.ENOENT)
if isinstance(val, set):
# In case of hardlinks, pick any path
val = next(iter(val))
return val
@calltrace_logger
def get_fd_by_inode(self, inode):
logger.debug("get_path_by_inode: %s" % self.inode2fd_map)
try:
val = self.inode2fd_map[inode]
except KeyError:
raise FUSEError(errno.ENOENT)
return val
@calltrace_logger
def add_inode_path_pair(self, inode, path):
self.lookup_cnt[inode] += 1
for _inode, _path in self.inode2vpath_map.items():
if _path == path :
logger.debug("path %s already in cache with inode %s, got inode '%s'", path, _inode, inode )
return
if inode not in self.inode2vpath_map:
self.inode2vpath_map[inode] = path
else :
logger.debug("checking if '%s' == '%s'", path, self.inode2vpath_map[inode] )
assert (path == self.inode2vpath_map[inode] )
return
@calltrace_logger
def update_inode_path_pair(self, inode, path):
logger.debug(inode in self.inode2vpath_map)
assert((inode in self.inode2vpath_map))
self.inode2vpath_map[inode] = path
@calltrace_logger
def forget(self, inode_list):
for (inode, nlookup) in inode_list:
if self.lookup_cnt[inode] > nlookup:
self.lookup_cnt[inode] -= nlookup
continue
logger.debug('forgetting about inode %d', inode)
# XXX We never put sth into inode2fd_map...
assert inode not in self.inode2fd_map
self.lookup_lock.acquire()
# XXX this could fail if inode is not looked up? Could put it in a proper try except:
del self.lookup_cnt[inode]
del self.inode2vpath_map[inode]
self.lookup_lock.release()
@calltrace_logger
def forget_path(self, inode, path):
"""
called by rmdir
"""
logger.debug('forget %s for %d', path, inode)
val = self.inode2vpath_map[inode]
if isinstance(val, set):
val.remove(path)
if len(val) == 1:
self.inode2vpath_map[inode] = next(iter(val))
else:
self.lookup_lock.acquire()
# XXX this could fail if inode is not looked up? Could put it in a proper try except:
del self.lookup_cnt[inode]
del self.inode2vpath_map[inode]
self.lookup_lock.release()
@calltrace_logger
def update_maps(self, old_path , new_path):
"""
update all internal maps in case of a rename of a directory
"""
# get proper difference between old and new path
logger.debug("update_maps: %s" % self.inode2vpath_map)
for inode in self.inode2vpath_map:
logger.debug("inode %s: replace %s by %s for %s", inode, old_path, new_path, self.inode2vpath_map[inode] )
self.inode2vpath_map[inode] = self.inode2vpath_map[inode].replace(old_path, new_path)
logger.debug("update_maps: %s" % self.inode2vpath_map)
return
|
"""
Afterglow Core: OAuth2 server routes
"""
from flask import redirect, request, url_for, render_template
from .. import app
from ..errors import MissingFieldError
from ..errors.oauth2 import UnknownClientError
from ..auth import auth_required
from ..oauth2 import oauth_clients, oauth_server
from ..resources.users import DbUserClient
@app.route('/oauth2/authorize', methods=['GET'])
@auth_required(allow_redirect=True)
def oauth2_authorize():
client_id = request.args.get('client_id')
if not client_id:
raise MissingFieldError('client_id')
# Check that the user allowed the client
if not DbUserClient.query.filter_by(
user_id=request.user.id, client_id=client_id).count():
# Redirect users to consent page if the client was not confirmed yet
return redirect(url_for(
'oauth2_consent', client_id=client_id, next=request.url))
return oauth_server.create_authorization_response(grant_user=request.user)
@app.route('/oauth2/token', methods=['POST'])
def oauth2_token():
return oauth_server.create_token_response()
@app.route('/oauth2/consent', methods=['GET'])
@auth_required(allow_redirect=True)
def oauth2_consent():
client_id = request.args.get('client_id')
if not client_id:
raise MissingFieldError('client_id')
if client_id not in oauth_clients:
raise UnknownClientError(id=client_id)
client = oauth_clients[client_id]
if request.method == 'GET':
return render_template(
'oauth2/consent.html.j2', oauth_client=client,
next_url=request.args.get('next'))
|
import unittest
import numpy as np
import ed_geometry as geom
import ed_symmetry as symm
import ed_fermions
import fermi_gas as fg
class TestFermions(unittest.TestCase):
def setUp(self):
pass
# single species fermi gas
def test_fg_energies(self):
"""
Test energies of single species of non-interacting fermions. Compare with Fermi sea calculation.
"""
u = 0
t = 1
nsites = 10
temps = np.logspace(-1, 1, 30)
# fermi sea calculation
kxs = 2 * np.pi * np.arange(0, nsites) / nsites
dispersion = - 2 * t * np.cos(kxs)
energy_exps = np.zeros(len(temps))
for ii, temp in enumerate(temps):
# two for two spin states
energy_exps[ii] = np.sum(np.divide(dispersion, np.exp(dispersion / temp) + 1))
# fermions calculation
cluster = geom.Geometry.createSquareGeometry(nsites, 1, 0, 0, bc1_open=False, bc2_open=True)
spinless_fermions = ed_fermions.fermions(cluster, u, t, nspecies=1)
hamiltonian = spinless_fermions.createH()
eig_vals, eig_vects = spinless_fermions.diagH(hamiltonian)
energy_exps_model = spinless_fermions.get_exp_vals_thermal(eig_vects, hamiltonian, eig_vals, temps, 0)
max_diff = np.abs(energy_exps - energy_exps_model).max()
self.assertAlmostEqual(max_diff, 0, 12)
def test_fg_density(self):
"""
compare single component fermi gas density calculated using ED versus expected result
:return:
"""
# hopping
t = 1
# temperatures
temps = np.logspace(-1, 1, 30) * t
temps = np.concatenate((np.array([0.]), temps))
betas = np.divide(1, temps)
betas[temps == 0] = np.inf
# mus
mus = np.linspace(-4, 4, 30)
# geometry
gm = geom.Geometry.createSquareGeometry(8, 1, 0, 0, bc1_open=False, bc2_open=True)
ed_dens = np.zeros((mus.size, temps.size))
fg_dens = np.zeros((mus.size, temps.size))
for ii, mu in enumerate(mus):
# ED
sf = ed_fermions.fermions(gm, 0, t, mus=mu, us_same_species=0, potentials=0, nspecies=1)
ham = sf.createH(print_results=False)
eig_vals, eig_vects = sf.diagH(ham, print_results=False)
ed_dens[ii, :], _ = sf.get_thermal_exp_sites(eig_vects, eig_vals, sf.n_op, 0, temps, sites=[0], format="boson")
# non-interacting fg calc
fg_dens[ii, :] = fg.fg_density(betas, mu, nsites=gm.nsites, dim='1d')
max_diff = np.abs(fg_dens - ed_dens).max()
self.assertAlmostEqual(max_diff, 0, 12)
def test_fg_correlations(self):
"""
compare single component fermi gas correlations calculated using ED versus expected result
:return:
"""
# hopping
t = 1
# temperatures
temps = np.logspace(-1, 1, 30) * t
temps = np.concatenate((np.array([0.]), temps))
# temps = np.array([0.])
betas = np.divide(1, temps)
betas[temps == 0] = np.inf
# mus
mus = np.linspace(-4, 4, 30)
# geometry
gm = geom.Geometry.createSquareGeometry(8, 1, 0, 0, bc1_open=False, bc2_open=True)
# solve at each mu
ed_corr = np.zeros((mus.size, temps.size))
fg_corr = np.zeros((mus.size, temps.size))
for ii, mu in enumerate(mus):
# ED
sf = ed_fermions.fermions(gm, 0, t, mus=mu, us_same_species=0, potentials=0, nspecies=1)
ham = sf.createH(print_results=False)
eig_vals, eig_vects = sf.diagH(ham, print_results=False)
exps, _ = sf.get_thermal_exp_sites(eig_vects, eig_vals, sf.n_op, 0, temps, projector=sf.n_projector, sites = [0, 1], format="boson")
corrs, _, _ = sf.get_thermal_corr_sites(eig_vects, eig_vals, 0, 0, sf.n_op, sf.n_op, temps,
sites1=np.array([0]),
sites2=np.array([1]), projector=sf.n_projector,
format="boson")
ed_corr[ii, :] = corrs - exps[0, :] * exps[1, :]
# non-interacting fg calc
fg_corr[ii, :] = fg.fg_corr(betas, mu, nsites=gm.nsites, dim='1d')
max_diff = np.abs(fg_corr - ed_corr).max()
self.assertAlmostEqual(max_diff, 0, 12)
@unittest.skip("not finished.")
def test_heisenberg(self):
""""
Spinless fermion model
\sum_<i,j> t (c^\dag_i c_j + h.c.) + U \sum_i (n_i - 0.5) * (n_{i+1} - 0.5)
maps to heisenberg model
\sum_<i,j> J * (S^x_i \cdot S^x_j + S^y_i \cdot S^y_j) + J_z * S^z_i \cdot S^z_j
J = 2 * t
J_z = U
"""
# TODO: finish this ... need to get comparison working ...
t = -0.5
U = 1.
mu = U
gm = geom.Geometry.createSquareGeometry(3, 3, 0, 0, bc1_open=False, bc2_open=False)
sf = ed_fermions.fermions(gm, 0, t, mus=mu, us_same_species=U, potentials=0, nspecies=1)
ham = sf.createH()
eig_vals, eig_vects = sf.diagH(ham, print_results=False)
offset = 0.25 * U * gm.nsites
eig_vals = eig_vals + offset
# hubbard
def test_hubbard_atomic_limit(self):
"""
Test Hubbard model with zero tunneling. Compare with atomic limit calculation
:return:
"""
u = 8
t = 0
temps = np.logspace(-1, 1, 30)
# atomic limit calculation for one site
z = 3 + np.exp(-np.divide(1.0, temps) * u)
energy_exps = u * np.divide(np.exp(-np.divide(1.0, temps) * u), z)
# ed calculation
cluster = geom.Geometry.createSquareGeometry(1, 1, 0, 0, bc1_open=True, bc2_open=True)
hubbard_model = ed_fermions.fermions(cluster, u, t, ns=np.array([1, 1]))
hamiltonian = hubbard_model.createH()
eig_vals, eig_vects = hubbard_model.diagH(hamiltonian)
energy_exps_model = np.zeros(len(temps))
for ii, temp in enumerate(temps):
energy_exps_model[ii] = hubbard_model.get_exp_vals_thermal(eig_vects, hamiltonian, eig_vals, temp, 0)
max_diff = np.abs(energy_exps - energy_exps_model).max()
self.assertAlmostEqual(max_diff, 0, 12)
def test_hubbard_non_interacting(self):
"""
Test Hubbard system with U=0. Compare with explicit Fermi sea calculation.
:return:
"""
u = 0
t = 1
nsites = 5
temps = np.logspace(-1, 1, 30)
# fermi sea calculation
kxs = 2 * np.pi * np.arange(0, nsites) / nsites
dispersion = - 2 * t * np.cos(kxs)
energy_exps = np.zeros(len(temps))
for ii, temp in enumerate(temps):
# two for two spin states
energy_exps[ii] = 2 * np.sum(np.divide(dispersion, np.exp(dispersion / temp) + 1))
# ed calculation
cluster = geom.Geometry.createSquareGeometry(nsites, 1, 0, 0, bc1_open=False, bc2_open=True)
hubbard_model = ed_fermions.fermions(cluster, u, t)
hamiltonian = hubbard_model.createH()
eig_vals, eig_vects = hubbard_model.diagH(hamiltonian)
energy_exps_model = hubbard_model.get_exp_vals_thermal(eig_vects, hamiltonian, eig_vals, temps, 0)
max_diff = np.abs(energy_exps - energy_exps_model).max()
self.assertAlmostEqual(max_diff, 0, 12)
def test_two_sites(self):
"""
Test two-site Hubbard system with open boundary conditions and no restriction on particle number.
:return:
"""
U = 20 * (np.random.rand() - 0.5)
t = np.random.rand()
gm = geom.Geometry.createSquareGeometry(2, 1, 0, 0, bc1_open=True, bc2_open=True)
model = ed_fermions.fermions(gm, U, t, ns=None)
hamiltonian = model.createH()
eig_vals, eig_vects = model.diagH(hamiltonian)
# vacuum, E = 0
# one up, E = -t, +t
# one down, E = -t, +t
# two ups, E = 0
# two downs, E = 0
# two ups and one down, E = U - t, U + t
# two downs and one up, E = U - t, U + t
# two ups and two downs, E = 2*U
# one up and one down subspace:
# symm combination of doublon site 0 and site 1, E = 8
# other three states, E =
# TODO: analytic expression for this remaining subspace
val0 = 0
val1 = 0.5 * (U + np.sqrt(U ** 2 + 16 * t ** 2))
val2 = 0.5 * (U - np.sqrt(U ** 2 + 16 * t ** 2))
expected_eig_vals = np.array([-t, -t, val0, val1, val2, 0., 0., 0., t, t, U - t, U - t, U, U + t, U + t, 2 * U])
expected_eig_vals.sort()
max_diff = np.abs(eig_vals - expected_eig_vals).max()
self.assertAlmostEqual(max_diff, 0, 13)
def test_two_sites_numbersubspace(self):
"""
Test 2-site hubbard system with open boundary conditions and 1 atom of each spin species
:return:
"""
U = 20 * (np.random.rand() - 0.5)
t = np.random.rand()
gm = geom.Geometry.createSquareGeometry(2, 1, 0, 0, bc1_open=True, bc2_open=True)
model = ed_fermions.fermions(gm, U, t, ns=np.array([1, 1]))
hamiltonian = model.createH(projector=model.n_projector)
eig_vals, eig_vects = model.diagH(hamiltonian)
expected_eig_vals = np.array([0, 0.5 * (U + np.sqrt(U ** 2 + 16 * t ** 2)), 0.5 * (U - np.sqrt(U ** 2 + 16 * t ** 2)), U])
expected_eig_vals.sort()
max_diff = np.abs(eig_vals - expected_eig_vals).max()
self.assertAlmostEqual(max_diff, 0, 13)
# hubbard with symmetries
def test_c4_symmetry_3by3(self):
"""
Test fourfold rotational symmetry (generated by 90 degree rotation) on a 3x3 Hubbard cluster with open
boundary conditions.
:return:
"""
U = 20 * (np.random.rand() - 0.5)
t = np.random.rand()
gm = geom.Geometry.createSquareGeometry(3, 3, 0, 0, bc1_open=False, bc2_open=False)
model = ed_fermions.fermions(gm, U, t, ns=np.array([1, 1]))
# no symmetry
hamiltonian_full = model.createH(projector=model.n_projector)
eig_vals_full, eig_vects_full = model.diagH(hamiltonian_full)
# use rotationl symmetry
cx, cy = model.geometry.get_center_of_mass()
rot_fn = symm.getRotFn(4, cx=cx, cy=cy)
rot_cycles, max_cycle_len_rot = symm.findSiteCycles(rot_fn, model.geometry)
rot_op = model.n_projector.dot(model.get_xform_op(rot_cycles).dot(model.n_projector.conj().transpose()))
symm_projs, _ = symm.getZnProjectors(rot_op, 4)
eig_vals_sectors = []
for ii, proj in enumerate(symm_projs):
h_sector = model.createH(projector=proj.dot(model.n_projector))
eig_vals_sector, eig_vects_sector = model.diagH(h_sector)
eig_vals_sectors.append(eig_vals_sector)
# why only accurate to 10 decimal places?
eigs_all_sectors = np.sort(np.concatenate(eig_vals_sectors))
max_diff = np.abs(eig_vals_full - eigs_all_sectors).max()
self.assertAlmostEqual(max_diff, 0, 10)
def test_d4_symmetry_3by3(self):
"""
Test D4 symmetry (generated by 90 degree rotation and a reflection) on a 3x3 Hubbard cluster with open
boundary conditions.
:return:
"""
# todo: do all number sectors
U = 20 * (np.random.rand() - 0.5)
t = np.random.rand()
gm = geom.Geometry.createSquareGeometry(3, 3, 0, 0, bc1_open=False, bc2_open=False)
model = ed_fermions.fermions(gm, U, t, ns=np.array([1, 2]))
# no symmetry
hamiltonian_full = model.createH(projector=model.n_projector)
eig_vals_full, eig_vects_full = model.diagH(hamiltonian_full)
# use rotationl symmetry
cx, cy = model.geometry.get_center_of_mass()
rot_fn = symm.getRotFn(4, cx=cx, cy=cy)
rot_cycles, max_cycle_len_rot = symm.findSiteCycles(rot_fn, model.geometry)
rot_op = model.n_projector.dot(model.get_xform_op(rot_cycles).dot(model.n_projector.conj().transpose()))
# reflection symmetry
refl_fn = symm.getReflFn(np.array([[0], [1]]), cx=cx, cy=cy)
refl_cycles, max_cycle_len_refl = symm.findSiteCycles(refl_fn, model.geometry)
refl_op = model.n_projector.dot(model.get_xform_op(refl_cycles).dot(model.n_projector.conj().transpose()))
symm_projs = symm.getD4Projectors(rot_op, refl_op)
eig_vals_sectors = []
for ii, proj in enumerate(symm_projs):
h_sector = model.createH(projector=proj.dot(model.n_projector))
eig_vals_sector, eig_vects_sector = model.diagH(h_sector)
eig_vals_sectors.append(eig_vals_sector)
# why only accurate to 10 decimal places?
eigs_all_sectors = np.sort(np.concatenate(eig_vals_sectors))
max_diff = np.abs(eig_vals_full - eigs_all_sectors).max()
self.assertAlmostEqual(max_diff, 0, 10)
def test_d2_symmetry_3by3(self):
"""
Test D2 symmetry (generated by 180 degree rotation and a reflection) on a 3x3 Hubbard cluster
with open boundary conditions.
:return:
"""
# todo: do all number sectors
U = 20 * (np.random.rand() - 0.5)
t = np.random.rand()
gm = geom.Geometry.createSquareGeometry(3, 3, 0, 0, bc1_open=False, bc2_open=False)
model = ed_fermions.fermions(gm, U, t, ns=np.array([3, 4]), nspecies=2)
# no symmetry
hamiltonian_full = model.createH(projector=model.n_projector)
eig_vals_full, eig_vects_full = model.diagH(hamiltonian_full)
# rotational symmetry
cx, cy = model.geometry.get_center_of_mass()
rot_fn = symm.getRotFn(2, cx=cx, cy=cy)
rot_cycles, max_cycle_len_rot = symm.findSiteCycles(rot_fn, model.geometry)
rot_op_full = model.get_xform_op(rot_cycles)
rot_op = model.n_projector.dot(rot_op_full.dot(model.n_projector.conj().transpose()))
# reflection symmetry
refl_fn = symm.getReflFn(np.array([[0], [1]]), cx=cx, cy=cy)
refl_cycles, max_cycle_len_refl = symm.findSiteCycles(refl_fn, model.geometry)
refl_op_full = model.get_xform_op(refl_cycles)
refl_op = model.n_projector.dot(refl_op_full.dot(model.n_projector.conj().transpose()))
symm_projs = symm.getD2Projectors(rot_op, refl_op)
eig_vals_sectors = []
for ii, proj in enumerate(symm_projs):
h_sector = model.createH(projector=proj.dot(model.n_projector))
eig_vals_sector, eig_vects_sector = model.diagH(h_sector)
eig_vals_sectors.append(eig_vals_sector)
# why only accurate to 10 decimal places?
eigs_all_sectors = np.sort(np.concatenate(eig_vals_sectors))
max_diff = np.abs(eig_vals_full - eigs_all_sectors).max()
self.assertAlmostEqual(max_diff, 0, 10)
def test_full_symm_3byb3(self):
U = 20 * (np.random.rand() - 0.5)
t = np.random.rand()
gm = geom.Geometry.createSquareGeometry(3, 3, 0, 0, bc1_open=False, bc2_open=False)
model = ed_fermions.fermions(gm, U, t, ns=np.array([2, 2]))
# no symmetry
hamiltonian_full = model.createH(projector=model.n_projector)
eig_vals_full, eig_vects_full = model.diagH(hamiltonian_full)
# rotational symmetry
cx, cy = model.geometry.get_center_of_mass()
rot_fn = symm.getRotFn(4, cx=cx, cy=cy)
rot_cycles, max_cycle_len_rot = symm.findSiteCycles(rot_fn, model.geometry)
rot_op = model.n_projector.dot(model.get_xform_op(rot_cycles).dot(model.n_projector.conj().transpose()))
# reflection symmetry
refl_fn = symm.getReflFn(np.array([[0], [1]]), cx=cx, cy=cy)
refl_cycles, max_cycle_len_refl = symm.findSiteCycles(refl_fn, model.geometry)
refl_op = model.n_projector.dot(model.get_xform_op(refl_cycles).dot(model.n_projector.conj().transpose()))
# translational symmetry
tx_fn = symm.getTranslFn(np.array([[1], [0]]))
tx_cycles, tx_max = symm.findSiteCycles(tx_fn, model.geometry)
tx_op_full = model.get_xform_op(tx_cycles)
tx_op = model.n_projector.dot(tx_op_full.dot(model.n_projector.conj().transpose()))
ty_fn = symm.getTranslFn((np.array([[0], [1]])))
ty_cycles, ty_max = symm.findSiteCycles(ty_fn, model.geometry)
ty_op_full = model.get_xform_op(ty_cycles)
ty_op = model.n_projector.dot(ty_op_full.dot(model.n_projector.conj().transpose()))
symm_projs = symm.getC4V_and_3byb3(rot_op, refl_op, tx_op, ty_op)
eig_vals_sectors = []
for ii, proj in enumerate(symm_projs):
h_sector = model.createH(projector=proj.dot(model.n_projector))
eig_vals_sector, eig_vects_sector = model.diagH(h_sector)
eig_vals_sectors.append(eig_vals_sector)
# why only accurate to 10 decimal places?
eigs_all_sectors = np.sort(np.concatenate(eig_vals_sectors))
max_diff = np.abs(eig_vals_full - eigs_all_sectors).max()
self.assertAlmostEqual(max_diff, 0, 10)
def test_translation_symmetry_3by3(self):
"""
Test translational symmetry for a 3x3 Hubbard system with periodic boundary conditions
:return:
"""
U = 20 * (np.random.rand() - 0.5)
t = np.random.rand()
gm = geom.Geometry.createSquareGeometry(3, 3, 0, 0, bc1_open=False, bc2_open=False)
hubbard = ed_fermions.fermions(gm, U, t, ns=np.array([1, 1]))
# no symmetry
hamiltonian_full = hubbard.createH(projector=hubbard.n_projector)
eig_vals_full, eig_vects_full = hubbard.diagH(hamiltonian_full)
# with symmetry
xtransl_fn = symm.getTranslFn(np.array([[1], [0]]))
xtransl_cycles, max_cycle_len_translx = symm.findSiteCycles(xtransl_fn, hubbard.geometry)
xtransl_op = hubbard.n_projector * hubbard.get_xform_op(xtransl_cycles) * hubbard.n_projector.conj().transpose()
# y-translations
ytransl_fn = symm.getTranslFn(np.array([[0], [1]]))
ytransl_cycles, max_cycle_len_transly = symm.findSiteCycles(ytransl_fn, hubbard.geometry)
ytransl_op = hubbard.n_projector * hubbard.get_xform_op(ytransl_cycles) * hubbard.n_projector.conj().transpose()
symm_projs, kxs, kys = symm.get2DTranslationProjectors(xtransl_op, max_cycle_len_translx, ytransl_op,
max_cycle_len_transly)
eig_vals_sectors = []
for ii, proj in enumerate(symm_projs):
h_sector = hubbard.createH(projector=proj.dot(hubbard.n_projector))
eig_vals_sector, eig_vects_sector = hubbard.diagH(h_sector)
eig_vals_sectors.append(eig_vals_sector)
# why only accurate to 10 decimal places?
eigs_all_sectors = np.sort(np.concatenate(eig_vals_sectors))
max_diff = np.abs(eig_vals_full - eigs_all_sectors).max()
self.assertAlmostEqual(max_diff, 0, 10)
if __name__ == '__main__':
unittest.main()
|
calls = {
'Thm': 'theorem',
'Proof': 'proof',
'Soln' : 'soln',
'Solution' : 'solution',
'Cor' : 'corollary',
'Lemma' : 'lemma',
'Prop' : 'proposition',
'Exer' : 'exercise',
'Exercise' : 'exercise',
'Example' : 'example',
'Theorem' : 'theorem',
'Def' : 'definition',
'Fact' : 'fact',
'Problem' : 'problem',
'Remark' : 'remark',
'Prob' : 'problem',
'Claim' : 'claim',
'Answer' : 'answer',
'Hint' : 'hint',
'Question' : 'ques',
'Conj' : 'conjecture',
}
generic_string = r'''call IMAP('Upper::', "\\begin{env}\<CR><++>\<CR>\\end{env}<++>", 'tex')
call IMAP('Upper[]::', "\\begin{env}[<++>]\<CR><++>\<CR>\\end{env}<++>", 'tex')
call IMAP('lower::', "\\begin{env*}\<CR><++>\<CR>\\end{env*}<++>", 'tex')
call IMAP('lower[]::', "\\begin{env*}[<++>]\<CR><++>\<CR>\\end{env*}<++>", 'tex')'''
for key, val in calls.iteritems():
final = generic_string
final = final.replace('Upper', key)
final = final.replace('lower', key.lower())
final = final.replace('env', val)
print final
|
#!/usr/vin/env python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Vadim Khitrin <me at vkhitrin.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: virt_customize_users
short_description: Manages users in guest images
version_added: "2.4"
description:
- Manages users in guest images
options:
image:
required: True
description: image path on filesystem.
name:
required: True
description: name of user
password:
required: False
description: user's password
state:
required: True
description: action to be performed
choices:
- present
- absent
automount:
required: False
description: Whether to perform auto mount of mountpoints inside guest disk image (REQUIRED for this module)
default: True
selinux_relabel:
required: False
description: Whether to perform SELinux contect relabeling during invocation
network:
required: False
description: Whether to enable network for appliance
default: True
---
requirements:
- "guestfs"
- "python >= 2.7.5"
author: Vadim Khitrin (@vkhitrin)
"""
EXAMPLES = """
---
- name: Creates a user
virt_customize_users:
image: /tmp/rhel7-5.qcow2
user: test_user
password: test_password
state: present
- name: Change password to an existing user
virt_customize_users:
image: /tmp/rhel7-5.qcow2
user: root
password: root_password
state: present
- name: Delete a user
virt_customize_users:
image: /tmp/rhel7-5.qcow2
user: root
password: root_password
state: absent
"""
RETURN = """
- msg:
type: string
when: failure
description: Contains the error message (may include python exceptions)
example: "cat: /fgdfgdfg/dfgdfg: No such file or directory"
- results:
type: array
when: success
description: Contains the module successful execution results
example: [
"test_user is present"
]
"""
from ansible.module_utils.virt_customize import guest
from ansible.module_utils.basic import AnsibleModule
import re
def users(guest, module):
state = module.params['state']
user_name = module.params['name']
user_password = module.params['password']
results = {
'changed': False,
'failed': False,
'results': []
}
err = False
if module.params['automount']:
try:
guest.sh_lines('id -u {}'.format(user_name))
user_exists = True
except Exception:
user_exists = False
if state == 'present':
if user_exists:
try:
guest.sh_lines('echo {u}:{p} | chpasswd'.format(u=user_name,
p=user_password))
except Exception as e:
err = True
results['failed'] = True
results['msg'] = str(e)
else:
try:
guest.sh_lines('useradd {user}'.format(user=user_name))
guest.sh_lines('echo {u}:{p} | chpasswd'.format(u=user_name,
p=user_password))
except Exception as e:
err = True
results['failed'] = True
results['msg'] = str(e)
elif state == 'absent':
if user_exists:
try:
guest.sh_lines('userdel {user}'.format(user=user_name))
except Exception as e:
err = True
results['failed'] = True
results['msg'] = str(e)
if not err:
results['changed'] = True
results['results'].append('{u} is {s}'.format(u=user_name, s=state))
return results, err
def main():
required_togheter_args = [['name', 'state']]
module = AnsibleModule(
argument_spec=dict(
image=dict(required=True, type='str'),
automount=dict(required=False, type='bool', default=True),
network=dict(required=False, type='bool', default=True),
selinux_relabel=dict(required=False, type='bool', default=False),
name=dict(required=True, type='str'),
# TODO vkhitrin: state=absent and no password support
password=dict(required=True, type='str', no_log=True),
state=dict(required=True, choices=['present', 'absent']),
debug=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False)
),
required_together=required_togheter_args,
supports_check_mode=True
)
g = guest(module)
instance = g.bootstrap()
results, err = users(instance, module)
g.close()
if err:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
# from conftest import *
def test_run_opt_load_results(default_model):
"""
This test checks that we can serialise optimiser results to YAML and reload,
getting the same information after loading.
It also checks that resetting the seed and re-running gives the same results.
"""
facility, product, steps = default_model
# Set up the optimiser
from biopharma import optimisation as opt
optimiser = opt.Optimiser(facility)
# Specify the variables to optimise.
optimiser.add_variable(
gen=opt.gen.Binary(), component=opt.sel.step('test_step'), item='bool_param')
optimiser.add_variable(gen=opt.gen.RangeGenerator(0, 10),
component=opt.sel.step('test_step'), item='int_param')
# Specify the objective(s)
optimiser.add_objective(component=opt.sel.product(0), item='cogs', minimise=True)
# Run optimisation
optimiser.parameters['populationSize'] = 2
optimiser.parameters['maxGenerations'] = 1
optimiser.run()
original_pop = optimiser.outputs['finalPopulation']
original_best_fitness = optimiser.outputs['bestObjectiveValues']
# Save results and re-load
results = optimiser.save_results()
optimiser.load_results(results)
# Check loaded individuals are distinct but equivalent
for orig, loaded in zip(original_pop, optimiser.outputs['finalPopulation']):
assert orig is not loaded
assert orig == loaded
assert original_best_fitness == optimiser.outputs['bestObjectiveValues']
# Reset the seed and re-run
optimiser.set_seed(optimiser.outputs['seed'])
optimiser.run()
# Check for equivalent individuals again
for orig, rerun in zip(original_pop, optimiser.outputs['finalPopulation']):
assert orig is not rerun
assert orig == rerun
# And as an extra sanity check, verify we get the same fitness for the best solution
assert original_best_fitness == optimiser.outputs['bestObjectiveValues']
|
#!/usr/bin/env python
import glob
import os
import matplotlib.pyplot as plt
def main():
res_per_k = {}
for fn in glob.glob(os.path.expanduser('~/logs/') + '*-Topk-*'):
if os.path.isfile(fn):
# bfn = os.path.basename(fn)
# mechanism, topk, rate, directory, timestep, hh, cm = bfn.split('-')
with open(fn, "r") as f:
f_res = f.readlines()
for l in f_res:
found = int(l.split(':')[1])
k = int(l.split(':')[0].split(' ')[0])
counters = int(l.split(':')[0].split(' ')[1])
try:
res_per_k[k][counters] = found
except KeyError:
res_per_k[k] = {counters: found}
for k in res_per_k.keys():
res = []
counters = []
sorted_res_per_k = sorted(res_per_k[k], key=lambda x: x)
for counter in sorted_res_per_k:
counters.append(counter)
res.append(100.0 * (res_per_k[k][counter]) / k)
plt.plot(counters, res, marker=(4, k % 4, 360 / k), label='k=' + str(k))
plt.ylim((0, 100))
plt.ylabel('percentage')
plt.xlabel('counters')
plt.title('Percentage of found top-k flows')
plt.legend(numpoints=1)
plt.show()
if __name__ == '__main__':
main()
|
import environ
from .base import *
# Read .env if exists
env = environ.Env()
env.read_env(os.path.join(BASE_DIR, '.env'))
#####################
# Security settings #
#####################
DEBUG = True
SECRET_KEY = env('SECRET_KEY')
# ALLOWED_HOSTS = env('ALLOWED_HOSTS')
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
############
# Database #
############
DATABASES = {
'default': env.db()
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
###########
# Logging #
###########
"""
LOGGING = {
# バージョンは「1」固定
'version': 1,
# 既存のログ設定を無効化しない
'disable_existing_loggers': False,
# ログフォーマット
'formatters': {
# 開発用
'develop': {
'format': '%(asctime)s [%(levelname)s] %(pathname)s:%(lineno)d'
'%(message)s'
},
},
# ハンドラ
'handlers': {
# コンソール出力用ハンドラ
'file': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'develop',
},
},
# ロガー
'loggers': {
# 自作アプリケーション全般のログを拾うロガー
'': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
# Django本体が出すログ全般を拾うロガー
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
}
},
}
"""
##################
# Email settings #
##################
EMAIL_CONFIG = env.email_url('EMAIL_URL')
vars().update(EMAIL_CONFIG)
###################
# Stripe settings #
###################
STRIPE_API_KEY = env('STRIPE_API_KEY')
STRIPE_PUBLISHABLE_KEY = env('STRIPE_PUBLISHABLE_KEY')
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import Script
config = Script.get_config()
pxf_service_name = "pxf-service"
stack_name = str(config["hostLevelParams"]["stack_name"])
# Users and Groups
pxf_user = "pxf"
pxf_group = pxf_user
hdfs_superuser_group = config["configurations"]["hdfs-site"]["dfs.permissions.superusergroup"]
user_group = config["configurations"]["cluster-env"]["user_group"]
tomcat_group = "tomcat"
# Directories
pxf_conf_dir = "/etc/pxf/conf"
pxf_instance_dir = "/var/pxf"
# Java home path
java_home = config["hostLevelParams"]["java_home"] if "java_home" in config["hostLevelParams"] else None
# Timeouts
default_exec_timeout = 600
# security related
security_enabled = config['configurations']['cluster-env']['security_enabled']
realm_name = config['configurations']['kerberos-env']['realm']
|
from random import choice
from copy import deepcopy
from collections import defaultdict
#import pisqpipe as pp
def get_neighbors(location, board):
'''
get the neighbors from the board for one location
'''
width = len(board)
height = len(board[0])
x, y = location
start_i = max(x-2, 0)
end_i = min (x+2, width-1) # can be reached
start_j = max(y-2, 0)
end_j = min (y+2, height-1) # can be reached
res = [ board[count] for count in range(start_i, end_i+1) ]
res = [ lis[start_j:end_j+1] for lis in res ]
return res
def get_around_info(board):
'''
get the around information in four directions
for every location in the board
'''
width = len(board)
height = len(board[0])
dic = {}
for i in range(width):
for j in range(height):
location = (i, j) # also the key
res = []
#row
start_j = max(j-4, 0)
end_j = min(height, j+5)
s = [ str(board[i][temp]) for temp in range(start_j, end_j) ]
s = ''.join(s)
if j-4 < 0:
s = '*'+s # boundary
if j+4 >= height:
s = s+'*'
res.append(s)
#column
start_i = max(0, i-4)
end_i = min(width, i+5)
s = [ str(board[temp][j]) for temp in range(start_i, end_i) ]
s = ''.join(s)
if i-4 < 0:
s = '*'+s
if i+4 >= width:
s = s+'*'
res.append(s)
#left diagonal
start_i = max(0, i-4)
end_i = min(width, i+5)
start_j = max(j-4, 0)
end_j = min(height, j+5)
s, x, y = [], start_i, start_j
while x < end_i and y < end_j:
s.append( str(board[x][y]) )
x += 1
y += 1
s = ''.join(s)
if i-4<0 or j-4<0:
s = '*'+s
if i+4>=width or j+4>=height:
s = s+'*'
res.append(s)
#right diagonal
start_i = max(0, i-4)
end_i = min(width, i+5)
start_j = min(height-1, j+4)
end_j = max(j-5, -1)
s, x, y = [], start_i, start_j
while x < end_i and y > end_j:
s.append( str(board[x][y]) )
x += 1
y -= 1
s = ''.join(s)
if i-4<0 or j+4>=height:
s = '*'+s
if i+4>=width or j-4<0:
s = s+'*'
res.append(s)
dic[location] = res
return dic
def get_point_info(location, board):
'''
get the information in four directions
for one location
'''
width = len(board)
height = len(board[0])
i, j = location
res = []
#row
start_j = max(j-4, 0)
end_j = min(height, j+5)
s = [ str(board[i][temp]) for temp in range(start_j, end_j) ]
s = ''.join(s)
if j-4 < 0:
s = '*'+s # boundary
if j+4 >= height:
s = s+'*'
res.append(s)
#column
start_i = max(0, i-4)
end_i = min(width, i+5)
s = [ str(board[temp][j]) for temp in range(start_i, end_i) ]
s = ''.join(s)
if i-4 < 0:
s = '*'+s
if i+4 >= width:
s = s+'*'
res.append(s)
#left diagonal
start_i = max(0, i-4)
end_i = min(width, i+5)
start_j = max(j-4, 0)
end_j = min(height, j+5)
s, x, y = [], start_i, start_j
while x < end_i and y < end_j:
s.append( str(board[x][y]) )
x += 1
y += 1
s = ''.join(s)
if i-4<0 or j-4<0:
s = '*'+s
if i+4>=width or j+4>=height:
s = s+'*'
res.append(s)
#right diagonal
start_i = max(0, i-4)
end_i = min(width, i+5)
start_j = min(height-1, j+4)
end_j = max(j-5, -1)
s, x, y = [], start_i, start_j
while x < end_i and y > end_j:
s.append( str(board[x][y]) )
x += 1
y -= 1
s = ''.join(s)
if i-4<0 or j+4>=height:
s = '*'+s
if i+4>=width or j-4<0:
s = s+'*'
res.append(s)
return res
possible_states = ['win5', 'alive4', 'lian-rush4', 'tiao-rush4', 'lian-alive3', 'tiao-alive3', \
'lian-sleep3', 'tiao-sleep3', 'te-sleep3', 'jia-alive3', 'alive2', 'sleep2', 'alive1', 'nothreat']
def my_state_string_to_dic(string_lis):
res = defaultdict(int) # key is the possible state
for string in string_lis:
if '11111' in string:
res['win5'] += 1
continue
if '011110' in string:
res['alive4'] += 1
continue
if '211110' in string or '011112' in string \
or '*11110' in string or '01111*' in string:
res['lian-rush4'] += 1
continue
if '11101' in string or '10111' in string or '11011' in string:
res['tiao-rush4'] += 1
continue
if '001110' in string or '011100' in string:
res['lian-alive3'] += 1
continue
if '011010' in string or '010110' in string:
res['tiao-alive3'] += 1
continue
if '211100' in string or '001112' in string \
or '*11100' in string or '00111*' in string:
res['lian-sleep3'] += 1
continue
if '211010' in string or '010112' in string \
or '*11010' in string or '01011*' in string\
or '210110' in string or '011012' in string\
or '*10110' in string or '01101*' in string:
res['tiao-sleep3'] += 1
continue
if '11001' in string or '10011' in string or '10101' in string:
res['te-sleep3'] += 1
continue
if '2011102' in string or '*011102' in string\
or '201110*' in string or '*01110*' in string:
res['jia-alive3'] += 1
continue
if '001100' in string or '011000' in string\
or '000110' in string or '001010' in string\
or '010100' in string or '010010' in string:
res['alive2'] += 1
continue
if '211000' in string or '000112' in string\
or '*11000' in string or '00011*' in string\
or '210100' in string or '001012' in string\
or '*10100' in string or '00101*' in string\
or '210010' in string or '010012' in string\
or '*10010' in string or '01001*' in string\
or '10001' in string or '2010102' in string\
or '*01010*' in string or '201010*' in string\
or '*010102' in string or '2011002' in string\
or '2001102' in string or '*011002' in string\
or '200110*' in string or '201100*' in string\
or '*001102' in string:
res['sleep2'] += 1
continue
if '010' in string:
res['alive1'] += 1
continue
res['nothreat'] += 1
return res
def my_score(res):
'''
score = 1100000*res['win5'] + 50000*res['alive4'] + \
6100*res['lian-rush4'] + 6000*res['tiao-rush4'] + \
3000*res['lian-alive3'] + 2500*res['tiao-alive3'] + \
700*res['lian-sleep3'] + 600*res['tiao-sleep3'] + \
600*res['te-sleep3'] + 600*res['jia-alive3'] + \
400*res['alive2'] + 300*res['sleep2'] + \
180*res['alive1'] + 10*res['nothreat']
'''
score = 1100000*res['win5'] + 6000*res['alive4'] + \
5100*res['lian-rush4'] + 5000*res['tiao-rush4'] + \
4500*res['lian-alive3'] + 4500*res['tiao-alive3'] + \
3500*res['lian-sleep3'] + 3000*res['tiao-sleep3'] + \
2800*res['te-sleep3'] + 2800*res['jia-alive3'] + \
2500*res['alive2'] + 2500*res['sleep2'] + \
1900*res['alive1'] + 1000*res['nothreat']
return score
def my_point_score(location, board):
'''
location is the place where you (AI) want to put a stone,
then calculate the score (reward),
which is the score if AI put the stone in location
'''
x, y = location
if board[x][y] != 0:
return 0 # if the location is not empty, then the score is 0
changed_board = deepcopy(board)
changed_board[x][y] = 1 # if we put the stone in location
string_lis = get_point_info(location, changed_board) # the info strings on four directions
res = my_state_string_to_dic(string_lis)
# calculate score using dict res
score = my_score(res)
return score
def my_score_matrix(board):
'''
to evaluate my current situation
'''
width = len(board)
height = len(board[0])
string_dic = get_around_info(board)
score_matrix = [ [0.0 for j in range(height)] for i in range(width) ]
for i in range(width):
for j in range(height):
if board[i][j] == 0:
location = (i, j)
score = my_point_score(location, board)
score_matrix[i][j] = score
else:
score_matrix[i][j] = 0
return score_matrix
def opponent_state_string_to_dic(string_lis):
res = defaultdict(int) # key is the possible state
for string in string_lis:
if '22222' in string:
res['win5'] += 1
continue
if '022220' in string:
res['alive4'] += 1
continue
if '122220' in string or '022221' in string\
or '*22220' in string or '02222*' in string:
res['lian-rush4'] += 1
continue
if '22202' in string or '20222' in string or '22022' in string:
res['tiao-rush4'] += 1
continue
if '002220' in string or '022200' in string:
res['lian-alive3'] += 1
continue
if '022020' in string or '020220' in string:
res['tiao-alive3'] += 1
continue
if '122200' in string or '002221' in string\
or '*22200' in string or '00222*' in string:
res['lian-sleep3'] += 1
continue
if '122020' in string or '020221' in string\
or '*22020' in string or '02022*' in string\
or '120220' in string or '022021' in string\
or '*20220' in string or '02202*' in string:
res['tiao-sleep3'] += 1
continue
if '22002' in string or '20022' in string or '20202' in string:
res['te-sleep3'] += 1
continue
if '1022201' in string or '*022201' in string\
or '102220*' in string or '*02220*' in string:
res['jia-alive3'] += 1
continue
if '002200' in string or '022000' in string\
or '000220' in string or '002020' in string\
or '020200' in string or '020020' in string:
res['alive2'] += 1
continue
if '122000' in string or '000221' in string\
or '*22000' in string or '00022*' in string\
or '120200' in string or '002021' in string\
or '*20200' in string or '00202*' in string\
or '120020' in string or '020021' in string\
or '*20020' in string or '02002*' in string\
or '20002' in string or '1020201' in string\
or '*02020*' in string or '102020*' in string\
or '*020201' in string or '1022001' in string\
or '1002201' in string or '*022001' in string\
or '100220*' in string or '102200*' in string\
or '*002201' in string:
res['sleep2'] += 1
continue
if '020' in string:
res['alive1'] += 1
continue
res['nothreat'] += 1
return res
def opponent_score(res):
'''
The corresponding score has to be higher than my_score,
because the score means that IF we put the stone here
'''
'''
score = 1000000*res['win5'] + 30000*res['alive4'] + \
7000*res['lian-rush4'] + 6000*res['tiao-rush4'] + \
2500*res['lian-alive3'] + 2000*res['tiao-alive3'] + \
800*res['lian-sleep3'] + 700*res['tiao-sleep3'] + \
700*res['te-sleep3'] + 700*res['jia-alive3'] + \
500*res['alive2'] + 350*res['sleep2'] + \
200*res['alive1'] + 10*res['nothreat']
'''
score = 1000000*res['win5'] + 7000*res['alive4'] + \
5300*res['lian-rush4'] + 5200*res['tiao-rush4'] + \
4700*res['lian-alive3'] +4700*res['tiao-alive3'] + \
3700*res['lian-sleep3'] + 3200*res['tiao-sleep3'] + \
3000*res['te-sleep3'] + 3000*res['jia-alive3'] + \
2600*res['alive2'] + 2600*res['sleep2'] + \
1900*res['alive1'] + 1000*res['nothreat']
return score
def opponent_point_score(location, board):
'''
location is the place where opponent wants to put a stone,
then calculate the score (reward),
which is the score if opponent puts the stone in location
'''
x, y = location
if board[x][y] != 0:
return 0 # if the location is not empty, then the score is 0
changed_board = deepcopy(board)
changed_board[x][y] = 2 # if opponent puts the stone in location
string_lis = get_point_info(location, changed_board) # the info strings on four directions
res = opponent_state_string_to_dic(string_lis)
# calculate score using dict res
score = opponent_score(res)
return score
def opponent_score_matrix(board):
'''
to evaluate opponent's current situation
'''
width = len(board)
height = len(board[0])
string_dic = get_around_info(board)
score_matrix = [ [0.0 for j in range(height)] for i in range(width) ]
for i in range(width):
for j in range(height):
if board[i][j] == 0:
location = (i, j)
score = opponent_point_score(location, board)
score_matrix[i][j] = score
else:
score_matrix[i][j] = 0
return score_matrix
def my_move(board):
width = len(board)
height = len(board[0])
# start
tmp1 = [ sum(lis) for lis in board ]
tmp2 = sum (tmp1)
if tmp2 == 0:
# my first
x = int(width/2)
y = int(height/2)
return (x, y)
if tmp2 == 2:
# opponent first
m = tmp1.index(2)
row = board[m]
n = row.index(2)
lis = [ (m-1,n), (m+1,n), (m,n-1), (m,n+1)] # no diagonal
feasible = []
for pair in lis:
x, y = pair
if 0<= x < width and 0<= y < height:
feasible.append( (x, y) )
return choice(feasible)
# score
my_matrix = my_score_matrix(board) # heavy
opponent_matrix = opponent_score_matrix(board) # heavy
my_max = -1
oppo_max = -1
for i in range(width):
for j in range(height):
# my
if my_matrix[i][j] >= my_max:
my_max = my_matrix[i][j]
# opponent
if opponent_matrix[i][j] >= oppo_max:
oppo_max = opponent_matrix[i][j]
my_max_list = [] # locations with max score
oppo_max_list = []
for i in range(width):
for j in range(height):
# my
if my_matrix[i][j] == my_max:
my_max_list.append( (i, j) ) # possible locations
#opponent
if opponent_matrix[i][j] == oppo_max:
oppo_max_list.append( (i, j) )
if my_max > oppo_max:
# attack
if len(my_max_list) == 1:
return my_max_list[0]
else:
# my max, opponent max
lis = [ (opponent_matrix[pair[0]][pair[1]], pair) for pair in my_max_list ] # pair is location
lis.sort(reverse=True)
return lis[0][1] # my max, opponent max location
else:
# defence
if len(oppo_max_list) == 1:
return oppo_max_list[0]
else:
# opponent max, my max
lis = [ (my_matrix[pair[0]][pair[1]], pair) for pair in oppo_max_list ]
lis.sort(reverse=True)
return lis[0][1]
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from xgboost.sklearn import XGBClassifier
import xgboost as xgb
def icu(train):
df = pd.read_excel(train)
df_new = df.groupby('PATIENT_VISIT_IDENTIFIER', as_index=False)\
.fillna(method='ffill')\
.fillna(method='bfill')
#added as PATIENT_VISIT_IDENTIFIER removed during grouping
df_new['PATIENT_VISIT_IDENTIFIER'] = df.PATIENT_VISIT_IDENTIFIER
j = 0
for i in df_new.columns:
if type(df_new[i].iloc[j]) == str:
factor = pd.factorize(df_new[i])
df_new[i] = factor[0]
definitions = factor[1]
j = j + 1
y = df_new["ICU"]
X = df_new.drop(['ICU'], axis = 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, stratify=y)
xgb1 = XGBClassifier(learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,eval_metric='mlogloss',
subsample=0.8,
colsample_bytree=0.8,
nthread=4,
scale_pos_weight=1,
seed=27, use_label_encoder=False )
xgb1.fit(X_train, y_train)
preds = xgb1.predict(X_test)
accuracy = accuracy_score(y_test, preds)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
prediction = pd.DataFrame(preds, columns=['predictions']).to_csv('prediction.csv')
print("Enter the path of the file")
s = input()
icu(s) |
from typing import Dict
from torch import nn, Tensor
from .utils import set_hessian, solve
from .. import properties as p
class NewtonDirection(nn.Module):
def forward(self, mol: Dict[str, Tensor]):
mol = set_hessian(mol)
mol[p.gen_stp_dir] = -solve(mol[p.gen_grd], mol[p.gen_hes])
return mol
|
import os
import random
import numpy as np
from nltk import word_tokenize, pos_tag
import nltk
import csv
import string
import collections as ct
import pandas as pd
from CRF_ReceiptGetter import ReceiptGetter
annai_data_count = 0
mega_data_count = 0
north_data_count = 50
# annai_data=pd.read_csv("Annotated/annai_featured.csv")
# annai_getter = ReceiptGetter(annai_data)
# annai_receipts = annai_getter.receipts
# random.shuffle(annai_receipts)
# annai_receipts=annai_receipts[0:annai_data_count]
north_data=pd.read_csv("Annotated/north_featured.csv")
north_getter = ReceiptGetter(north_data)
north_receipts = north_getter.receipts
random.shuffle(north_receipts)
sub_north_receipts=north_receipts[0:north_data_count]
print(len(sub_north_receipts))
mega_data=pd.read_csv("Annotated/mega_featured.csv")
mega_getter = ReceiptGetter(mega_data)
mega_receipts = mega_getter.receipts
random.shuffle(mega_receipts)
sub_mega_receipts=mega_receipts[0:mega_data_count]
print(len(sub_mega_receipts))
i=0
with open('/home/thumilan/Desktop/LSTM-sample/Annotated/TreeData/data_not_numbered.csv', 'a') as csvFile:
# for item in annai_receipts:
# for data in item:
# # data = (item[0], item[1], item[2], item[3], item[4],
# # item[5], item[6], item[7], item[8], item[9], item[10])
# writer = csv.writer(csvFile)
# writer.writerow(data)
for item in sub_mega_receipts:
for data in item:
# data = (item[0], item[1], item[2], item[3], item[4],
# item[5], item[6], item[7], item[8], item[9], item[10])
writer = csv.writer(csvFile)
writer.writerow(data)
i=i+1
print(i)
for item in sub_north_receipts:
for data in item:
# data = (item[0], item[1], item[2], item[3], item[4],
# item[5], item[6], item[7], item[8], item[9], item[10])
writer = csv.writer(csvFile)
writer.writerow(data)
i = i + 1
print(i)
csvFile.close()
|
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
from scipy.stats import t
from scipy.stats import norm
import lib.least_squares as ls
def regression_4_by_4(x, y_0, y_1 = None, y_2 = None, y_3 = None, cols = 1,
rows = 1, suptitle = None, subtitle_0 = None,
subtitle_1 = None, subtitle_2 = None, subtitle_3 = None,
xlabels = None, ylabels = None, xmin = None, xmax = None,
xticks = None):
plt.figure(figsize = (15, rows * 5))
if isinstance(suptitle, str):
plt.suptitle('Regression Analysis \n' + suptitle)
else:
plt.suptitle('Regression Analysis')
plt.subplot(rows, cols, 1)
#Underscore and the start of label text will surpress the label on the plot
plt.scatter(x, y_0, label = '_x')
plt.plot(x, ls.least_squares(x, y_0).get('upi'), 'r--', label = '_x')
plt.plot(x, ls.least_squares(x, y_0).get('l'), 'b--', label = '_x')
plt.plot(x, ls.least_squares(x, y_0).get('lpi'), 'r--', label = '_x')
plt.fill_between(x, ls.least_squares(x, y_0).get('uci')['Data Year'],\
ls.least_squares(x, y_0).get('lci')['Data Year'],\
alpha = 0.25, color = '#99caff')
plt.grid()
if xmin or xmax != None:
plt.xlim(xmin, xmax)
else:
None
if xticks != None:
plt.xticks(xticks)
else:
None
if isinstance(subtitle_0, str):
plt.title(subtitle_0)
else:
plt.title('Observations')
if isinstance(ylabels, str):
plt.ylabel(ylabels)
else:
plt.ylabel('Obs.')
if isinstance(xlabels, str):
plt.xlabel(xlabels)
else:
plt.xlabel('Index')
#The emplty plots below are used to hold custom labels for the legend
plt.plot([], [], ' ', label = 'Slope: '\
+ ls.least_squares(x, y_0).get('m').astype(str))
plt.plot([], [], ' ', label = 'R-sq: '\
+ ls.least_squares(x, y_0).get('r_sq').astype(str))
if ls.least_squares(x, y_0).get('p') < 0.0001:
p_value = 'p value: <0.0001'
else:
p_value = 'p value: ' + ls.least_squares(x, y_0).get('p').astype(str)
plt.plot([], [], ' ', label = p_value)
plt.legend(loc = 2)
plt.subplot(rows, cols, cols)
#Underscore and the start of label text will surpress the label on the plot
plt.scatter(x, y_1, label = '_x')
plt.plot(x, ls.least_squares(x, y_1).get('upi'), 'r--', label = '_x')
plt.plot(x, ls.least_squares(x, y_1).get('l'), 'b--', label = '_x')
plt.plot(x, ls.least_squares(x, y_1).get('lpi'), 'r--', label = '_x')
plt.fill_between(x, ls.least_squares(x, y_1).get('uci')['Data Year'],\
ls.least_squares(x, y_1).get('lci')['Data Year'],\
alpha = 0.25, color = '#99caff')
plt.grid()
if xmin or xmax != None:
plt.xlim(xmin, xmax)
else:
None
if xticks != None:
plt.xticks(xticks)
else:
None
if isinstance(subtitle_1, str):
plt.title(subtitle_1)
else:
plt.title('Observations')
if isinstance(ylabels, str):
plt.ylabel(ylabels)
else:
plt.ylabel('Obs.')
if isinstance(xlabels, str):
plt.xlabel(xlabels)
else:
plt.xlabel('Index')
#The emplty plots below are used to hold custom labels for the legend
plt.plot([], [], ' ', label = 'Slope: '\
+ ls.least_squares(x, y_1).get('m').astype(str))
plt.plot([], [], ' ', label = 'R-sq: '\
+ ls.least_squares(x, y_1).get('r_sq').astype(str))
if ls.least_squares(x, y_1).get('p') < 0.0001:
p_value = 'p value: <0.0001'
else:
p_value = 'p value: ' + ls.least_squares(x, y_1).get('p').astype(str)
plt.plot([], [], ' ', label = p_value)
plt.legend(loc = 2)
if rows == 2:
plt.subplot(rows, cols, rows + 1)
#Underscore and the start of label text will surpress the label on the plot
plt.scatter(x, y_2, label = '_x')
plt.plot(x, ls.least_squares(x, y_2).get('upi'), 'r--', label = '_x')
plt.plot(x, ls.least_squares(x, y_2).get('l'), 'b--', label = '_x')
plt.plot(x, ls.least_squares(x, y_2).get('lpi'), 'r--', label = '_x')
plt.fill_between(x, ls.least_squares(x, y_2).get('uci')['Data Year'],\
ls.least_squares(x, y_2).get('lci')['Data Year'],\
alpha = 0.25, color = '#99caff')
plt.grid()
if xmin or xmax != None:
plt.xlim(xmin, xmax)
else:
None
if xticks != None:
plt.xticks(xticks)
else:
None
if isinstance(subtitle_2, str):
plt.title(subtitle_2)
else:
plt.title('Observations')
if isinstance(ylabels, str):
plt.ylabel(ylabels)
else:
plt.ylabel('Obs.')
if isinstance(xlabels, str):
plt.xlabel(xlabels)
else:
plt.xlabel('Index')
#The emplty plots below are used to hold custom labels for the legend
plt.plot([], [], ' ', label = 'Slope: '\
+ ls.least_squares(x, y_2).get('m').astype(str))
plt.plot([], [], ' ', label = 'R-sq: '\
+ ls.least_squares(x, y_2).get('r_sq').astype(str))
if ls.least_squares(x, y_2).get('p') < 0.0001:
p_value = 'p value: <0.0001'
else:
p_value = 'p value: ' + ls.least_squares(x, y_2).get('p').astype(str)
plt.plot([], [], ' ', label = p_value)
plt.legend(loc = 2)
plt.subplot(rows, cols, cols + rows)
#Underscore and the start of label text will surpress the label on the plot
plt.scatter(x, y_3, label = '_x')
plt.plot(x, ls.least_squares(x, y_3).get('upi'), 'r--', label = '_x')
plt.plot(x, ls.least_squares(x, y_3).get('l'), 'b--', label = '_x')
plt.plot(x, ls.least_squares(x, y_3).get('lpi'), 'r--', label = '_x')
plt.fill_between(x, ls.least_squares(x, y_3).get('uci')['Data Year'],\
ls.least_squares(x, y_3).get('lci')['Data Year'],\
alpha = 0.25, color = '#99caff')
plt.grid()
if xmin or xmax != None:
plt.xlim(xmin, xmax)
else:
None
if xticks != None:
plt.xticks(xticks)
else:
None
if isinstance(subtitle_3, str):
plt.title(subtitle_3)
else:
plt.title('Observations')
if isinstance(ylabels, str):
plt.ylabel(ylabels)
else:
plt.ylabel('Obs.')
if isinstance(xlabels, str):
plt.xlabel(xlabels)
else:
plt.xlabel('Index')
#The emplty plots below are used to hold custom labels for the legend
plt.plot([], [], ' ', label = 'Slope: '\
+ ls.least_squares(x, y_3).get('m').astype(str))
plt.plot([], [], ' ', label = 'R-sq: '\
+ ls.least_squares(x, y_3).get('r_sq').astype(str))
if ls.least_squares(x, y_3).get('p') < 0.0001:
p_value = 'p value: <0.0001'
else:
p_value = 'p value: ' + ls.least_squares(x, y_3).get('p').astype(str)
plt.plot([], [], ' ', label = p_value)
plt.legend(loc = 2)
else:
None
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
import glob
import sys
import re
import os
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Meiryo', 'Yu Gothic', 'DejaVu Sans']
os.chdir(sys.argv[1])
input = os.path.basename(sys.argv[1])
input = input.replace('_benchmark_log','')
BitDepth_array = ['8bit', '10bit', 'Unspecified']
markers = [ 'o', 'v', '^', '<', '>' , 's', 'D', 'd', 'p', '*', 'h', 'H', '+', 'x', '|', '_' , '.', ',', '8', '1', '2', '3', '4' ]
metric_array = ['PSNR_Y', 'PSNR_Average', 'SSIM_Y', 'SSIM_All', 'VMAF', 'MS-SSIM', 'fps' , 'Sec']
size_array = ['', '_bpp']
for size in size_array:
for BitDepth in BitDepth_array:
row = 2
for metric in metric_array:
csv_list = glob.glob('*_' + '(' + BitDepth + ').csv')
plt.figure(figsize=(12, 8))
csvFileExist = False
MNum = 0
for csv_name in csv_list:
csvFileExist = True
codec_name = csv_name
codec_name = csv_name.replace(input,'')
codec_name = re.sub('_' + '(.+)' + '_' + '\(' + BitDepth + '\)\.csv','\\1',codec_name)
data = np.loadtxt(csv_name ,comments='#' ,dtype='float' ,delimiter=',', skiprows=1, usecols=(1,2,3,4,5,6,7,8,9,10), encoding='utf-8')
if size == '':
x_txt = data[:,0]
else:
x_txt = data[:,1]
y_txt = data[:,row]
plt.plot(x_txt,y_txt , label = codec_name , marker=markers[MNum])
MNum = MNum + 1
if csvFileExist:
plt.title(input)
if size == '_bpp':
plt.xlabel("bpp")
else:
plt.xlabel("bitrate(kbps)")
if metric == 'PSNR_Y' or metric == 'PSNR_Average':
plt.ylabel(metric + ' (dB)')
else:
if metric == 'Sec':
plt.ylabel('Elapsed Time (sec)')
else:
plt.ylabel(metric)
plt.grid(True,linestyle='dashed')
if metric == 'PSNR_Y' or metric == 'PSNR_Average' or metric == 'SSIM_Y' or metric == 'SSIM_All' or metric == 'VMAF' or metric == 'MS-SSIM':
plt.legend(loc='lower right')
else:
plt.legend(bbox_to_anchor=(1.01, 1), loc='upper left', borderaxespad=0)
plt.savefig(input + '_' + metric + '(' + BitDepth + ')' + size + '_Graph.png' , bbox_inches='tight')
plt.close()
row = row + 1
|
import sys
from models.instances import Host
from services.config import Configuration
from services.logger import print_orange
from .selectors import Selector
from .connection import DoConnectAndSave
class DiscoverHost(object):
def __init__(self, account_obj, bounce=False):
self.account_obj = account_obj
self.bounce = bounce
self.conf = Configuration()
def get_bounce(self):
bounce_host = Selector(self.account_obj, filters=[self.conf.bounce_host]).select_host_from_host_file()
# If no host file, then get it from state file
if bounce_host.connectionString is None:
bounce_host = Selector(self.account_obj, filters=[self.conf.bounce_host]).select_host_from_state_file()
bounce_host = self.discover_bounce(bounce_host)
return bounce_host
def get_host(self):
host = Selector(self.account_obj, filters=[self.conf.bounce_host]).select_host_from_state_file()
if host.connectionString is None:
host = self.discover_bounce(host)
return host
def discover_bounce(self, host: Host) -> Host:
for user in self.conf.usernames:
host.connectionString.username = user
host.connectionString.connectionIP = host.privateIp if host.publicIp is None else host.publicIp
host.connectionString.keyPath = self.conf.ssh_key_path + host.key + '.pem'
# Means return code = 0, which is a success
if not DoConnectAndSave(host, self.account_obj).lazy_connect():
return host
print_orange('Failed finding a connection path for host, exiting.')
sys.exit(1)
def discover_host(self, host: Host, bounce_host=Host()) -> Host:
host.connectionString.keyPath = self.conf.ssh_key_path + host.key + '.pem'
for user in self.conf.usernames:
host.connectionString.username = user
if self.bounce:
host.connectionString.connectionIP = host.privateIp
host.connectionString.bounce_host = bounce_host.instanceId
# Means return code = 0, which is a success
if not DoConnectAndSave(host, self.account_obj).bounce_lazy_connect(bounce_host):
return host
else:
host.connectionString.connectionIP = host.privateIp if host.publicIp is None else host.publicIp
if not DoConnectAndSave(host, self.account_obj).lazy_connect():
return host
print_orange('Failed finding a connection path for host, exiting.')
sys.exit(1)
|
import numpy as np
def align(array, reader):
nifti_array = array
alignment_matrix = [reader.GetMetaData('srow_x').split()[:3], reader.GetMetaData('srow_y').split()[:3],
reader.GetMetaData('srow_z').split()[:3]]
for row in range(len(alignment_matrix)):
for col in range(len(alignment_matrix[0])):
alignment_matrix[row][col] = int(round(float(alignment_matrix[row][col])))
flip_list = [sum(x) for x in zip(alignment_matrix[0], alignment_matrix[1], alignment_matrix[2])]
for position in range(len(flip_list)):
if flip_list[position] < 0:
nifti_array = np.flip(nifti_array, axis=position)
for row in range(len(alignment_matrix)):
for col in range(len(alignment_matrix[0])):
alignment_matrix[row][col] = abs(alignment_matrix[row][col])
if abs(alignment_matrix[0][0]) != 1:
tranpose_list = [0, 1, 2]
for index in range(len(alignment_matrix)):
if alignment_matrix[index][0] == 1:
swap_index = index
break
tranpose_list[swap_index], tranpose_list[0] = tranpose_list[0], tranpose_list[swap_index]
alignment_matrix[swap_index], alignment_matrix[0] = alignment_matrix[0], alignment_matrix[swap_index]
nifti_array = np.transpose(nifti_array, tranpose_list)
nifti_array = np.flip(nifti_array, axis=swap_index)
if abs(alignment_matrix[1][1]) != 1:
tranpose_list = [0, 1, 2]
for index in range(len(alignment_matrix)):
if alignment_matrix[index][1] == 1:
swap_index = index
break
tranpose_list[swap_index], tranpose_list[1] = tranpose_list[1], tranpose_list[swap_index]
alignment_matrix[swap_index], alignment_matrix[1] = alignment_matrix[1], alignment_matrix[swap_index]
nifti_array = np.transpose(nifti_array, tranpose_list)
nifti_array = np.flip(nifti_array, axis=swap_index)
if abs(alignment_matrix[2][2]) != 1:
tranpose_list = [0, 1, 2]
for index in range(len(alignment_matrix)):
if alignment_matrix[index][2] == 1:
swap_index = index
break
tranpose_list[swap_index], tranpose_list[2] = tranpose_list[2], tranpose_list[swap_index]
alignment_matrix[swap_index], alignment_matrix[2] = alignment_matrix[2], alignment_matrix[swap_index]
nifti_array = np.transpose(nifti_array, tranpose_list)
nifti_array = np.flip(nifti_array, axis=swap_index)
return nifti_array
|
# _*_ coding: utf-8 _*_
from zope.interface import Attribute, Interface
__author__ = "Md Nazrul Islam <email2nazrul@gmail.com>"
class IBaseClass(Interface):
""" """
_finalized = Attribute("Finalized Flag")
def finalize(context):
""" """
class ICloneable(Interface):
""" """
def clone():
""" """
def __copy__():
""""""
class IStorage(Interface):
""" """
_last_updated = Attribute("Last Updated")
_write_locked = Attribute("Write Locked")
_read_locked = Attribute("Read Locked")
def get(item):
""" """
def set(item, value):
""" """
def insert(item, value):
""" """
def delete(item):
""" """
def clear():
""" """
def exists(item):
""" """
def empty():
""" """
def total():
""" """
class IFhirPrimitiveType(Interface):
""" """
__visit_name__ = Attribute("visit name")
__regex__ = Attribute("Regex")
def to_python():
""" """
def to_json():
""" """
class IPrimitiveTypeCollection(Interface):
""" """
def add(item):
""" """
def remove(item=None, index=None):
""" """
class ITypeSpecifier(Interface):
""" """
class ITypeInfoWithElements(Interface):
""" """
def get_elements():
""" """
class IPathInfoContext(Interface):
""" """
fhir_release = Attribute("FHIR Release")
prop_name = Attribute("Property Name")
prop_original = Attribute("Original propety name")
type_name = Attribute("Type Name")
type_class = Attribute("Type Class")
optional = Attribute("Optional")
multiple = Attribute("Multiple")
class IModel(Interface):
"""FHIR Model Class"""
# --------------*-----------------
# ´´search.py``
class ISearch(Interface):
""" """
class ISearchContext(Interface):
""" """
class ISearchContextFactory(Interface):
""" """
class IFhirSearch(Interface):
""" """
# --------------*-----------------
# ´´query.py``
class IQuery(IBaseClass):
""" """
fhir_release = Attribute("FHIR Release Name")
class IQueryBuilder(IBaseClass):
""" """
context = Attribute("Fhir Query Context")
def bind(context):
""" """
class IQueryResult(Interface):
""" """
def fetchall(): # lgtm[py/not-named-self]
""" """
def single(): # lgtm[py/not-named-self]
"""Will return the single item in the input if there is just one item.
If the input collection is empty ({ }), the result is empty.
If there are multiple items, an error is signaled to the evaluation environment.
This operation is useful for ensuring that an error is returned
if an assumption about cardinality is violated at run-time."""
def first(): # lgtm[py/not-named-self]
"""Returns a collection containing only the first item in the input collection.
This function is equivalent to item(0), so it will return an empty collection
if the input collection has no items."""
def last(): # lgtm[py/not-named-self]
"""Returns a collection containing only the last item in the input collection.
Will return an empty collection if the input collection has no items."""
def tail():
"""Returns a collection containing all but the first item in the input
collection. Will return an empty collection
if the input collection has no items, or only one item."""
def skip(num: int): # lgtm[py/not-named-self]
"""Returns a collection containing all but the first num items
in the input collection. Will return an empty collection
if there are no items remaining after the indicated number of items have
been skipped, or if the input collection is empty.
If num is less than or equal to zero, the input collection
is simply returned."""
def take(num: int): # lgtm[py/not-named-self]
"""Returns a collection containing the first num items in the input collection,
or less if there are less than num items. If num is less than or equal to 0, or
if the input collection is empty ({ }), take returns an empty collection."""
def count(): # lgtm[py/not-named-self]
"""Returns a collection with a single value which is the integer count of
the number of items in the input collection.
Returns 0 when the input collection is empty."""
def empty(): # lgtm[py/not-named-self]
"""Returns true if the input collection is empty ({ }) and false otherwise."""
class IIgnoreModifierCheck(Interface):
""" """
class IIgnoreNotModifierCheck(IIgnoreModifierCheck):
""" """
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import pytest
import pytablewriter as ptw
class Test_CsvTableWriter_write_table:
@pytest.mark.parametrize(
["format_name"],
[[tblfmt.names[0]] for tblfmt in ptw.TableFormat.find_all_attr(ptw.FormatAttr.TEXT)],
)
def test_smoke_multi_byte(self, format_name):
writer = ptw.TableWriterFactory.create_from_format_name(
format_name,
table_name="生成に関するパターン",
headers=["パターン名", "概要", "GoF", "Code Complete[1]"],
value_matrix=[
["Abstract Factory", "関連する一連のインスタンスを状況に応じて、適切に生成する方法を提供する。", "Yes", "Yes"],
["Builder", "複合化されたインスタンスの生成過程を隠蔽する。", "Yes", "No"],
["Factory Method", "実際に生成されるインスタンスに依存しない、インスタンスの生成方法を提供する。", "Yes", "Yes"],
["Prototype", "同様のインスタンスを生成するために、原型のインスタンスを複製する。", "Yes", "No"],
["Singleton", "あるクラスについて、インスタンスが単一であることを保証する。", "Yes", "Yes"],
],
)
assert len(writer.dumps()) > 100
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 23 10:30:14 2020
@author: Ruben Andre Barreiro
"""
# Import NumPy Python's Library
import numpy
# Return Matrix with Planets' Orbital Radius and Orbital Periods
# Note: Each row corresponds to a Planet:
# - The 1st Column corresponds to the Orbital Radius
# - The 2nd Column corresponds to the Orbital Period
def load_planet_data(file_name):
rows = []
lines = open(file_name).readlines()
for line in lines[1:]:
parts = line.split(',')
rows.append( (float(parts[1]),float(parts[2])) )
return numpy.array(rows)
planets_data = load_planet_data('../files/planets.csv')
# Exercise #1:
# - Select the rows corresponding to
# Planets with Orbital Periods greater than 10 years
planets_with_orbital_periods_greater_than_10_years = planets_data[planets_data[:,1]>10,:]
print('Planets with Orbital Periods greater than 10 years:\n')
print(planets_with_orbital_periods_greater_than_10_years)
print('\n')
# Exercise #2:
# - How many planets have Orbital Periods greater than 10 years?
# - Hint: You can use the len function
# to find the length of an array or the numpy.sum function to find the total sum of array elements,
# which considers True to be 1 and False to be 0
how_many_planets_with_orbital_periods_greater_than_10_years = numpy.sum(planets_data[:,1]>10)
print('How many Planets with Orbital Periods greater than 10 years:\n')
print(how_many_planets_with_orbital_periods_greater_than_10_years)
print('\n')
# Exercise #3:
# - Select the Orbital Periods of the Planets,
# whose Orbital Periods in years, are greater than
# twice the Orbital Radius, in AU
# - Note that you can use algebraical operators,
# such as sum or multiplication, with array objects
planets_orbital_periods_greater_than_twice_the_orbital_radius_in_au = planets_data[planets_data[:,1]>(2*planets_data[:,0]),1]
print('Orbital Periods of the Planets, whose Orbital Periods, in years, are greater than twice the Orbital Radius, in AU:\n')
print(planets_orbital_periods_greater_than_twice_the_orbital_radius_in_au)
print('\n') |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for TimezoneExtraction plugin."""
from datetime import datetime, timedelta
import numpy as np
import pytest
from cf_units import Unit
from iris.coords import CellMethod
from iris.cube import Cube
from improver.metadata.check_datatypes import check_mandatory_standards
from improver.metadata.constants.time_types import TIME_COORDS
from improver.synthetic_data.set_up_test_cubes import (
add_coordinate,
set_up_variable_cube,
)
from improver.utilities.temporal import TimezoneExtraction
def make_input_cube(data_shape, time_bounds=True):
"""Makes a 3D cube (time, y, x) of the described shape, filled with zeroes for use
in testing."""
cube = set_up_variable_cube(
np.zeros(data_shape).astype(np.float32),
standard_grid_metadata="gl_ens",
attributes={
"institution": "unknown",
"source": "IMPROVER",
"title": "Unit test",
},
)
cube = add_coordinate(
cube,
[datetime(2017, 11, 10, 4, 0) + timedelta(hours=h) for h in range(2)],
"time",
coord_units=TIME_COORDS["time"].units,
dtype=TIME_COORDS["time"].dtype,
is_datetime=True,
)
if time_bounds:
cube.coord("time").bounds = np.array(
[
[
np.around(
Unit(TIME_COORDS["time"].units).date2num(
datetime(2017, 11, 10, 4, 0) + timedelta(hours=h + b)
)
)
for b in [-1, 0]
]
for h in range(2)
],
dtype=TIME_COORDS["time"].dtype,
)
return cube
def make_percentile_cube(data_shape_2d, time_bounds=True):
"""Adds a percentile coordinate to make_input_cube"""
cube = make_input_cube(data_shape_2d, time_bounds=time_bounds)
cube = add_coordinate(cube, (25, 50, 75), "percentile", "%")
return cube
def make_timezone_cube():
"""Makes a timezone cube to use in tests. data=0 where points fall in that
time-zone. data=1 where they don't."""
cube = set_up_variable_cube(
np.zeros((3, 4)).astype(np.float32),
name="timezone_mask",
units="1",
standard_grid_metadata="gl_ens",
attributes={
"institution": "unknown",
"source": "IMPROVER",
"title": "Unit test",
},
)
cube = add_coordinate(
cube,
[0, 3600],
"UTC_offset",
coord_units="seconds",
dtype=TIME_COORDS["forecast_period"].dtype,
)
true_row = [1, 1, 1, 1]
false_row = [0, 0, 0, 0]
cube.data = np.array(
[[true_row, true_row, false_row], [false_row, false_row, true_row],],
dtype=np.int8,
)
return cube
def assert_metadata_ok(output_cube):
"""Checks that the meta-data of output_cube are as expected"""
assert isinstance(output_cube, Cube)
assert output_cube.dtype == np.float32
assert list(output_cube.coord_dims("time")) == [
n for n, in [output_cube.coord_dims(c) for c in ["latitude", "longitude"]]
]
assert output_cube.coord("time").dtype == np.int64
check_mandatory_standards(output_cube)
@pytest.mark.parametrize("with_cell_method", (True, False))
def test_create_output_cube(with_cell_method):
"""Tests that the create_output_cube method builds a cube with appropriate
meta-data. The Time coord is tested in test_process as it depends on multiple
methods."""
data_shape = [3, 4]
cube = make_input_cube(data_shape)
if with_cell_method:
cell_method = CellMethod("minimum", coords="time")
cube.add_cell_method(cell_method)
local_time = datetime(2017, 11, 9, 12, 0)
plugin = TimezoneExtraction()
plugin.output_data = np.zeros(data_shape, dtype=np.float32)
plugin.time_points = np.full(
data_shape,
fill_value=Unit(TIME_COORDS["time"].units).date2num(
datetime(2017, 11, 10, 4, 0)
),
dtype=np.int64,
)
plugin.time_bounds = None
result = plugin.create_output_cube(cube, local_time)
assert_metadata_ok(result)
assert result.name() == cube.name()
assert result.units == cube.units
result_local_time = result.coord("time_in_local_timezone")
assert [cell.point for cell in result_local_time.cells()] == [local_time]
expected_shape = data_shape
assert result.shape == tuple(expected_shape)
assert result.attributes == cube.attributes
if with_cell_method:
assert result.cell_methods == tuple([cell_method])
@pytest.mark.parametrize("include_time_coord", (True, False))
def test_check_input_cube_dims(include_time_coord):
"""Checks that check_input_cube_dims can differentiate between an input cube
with time, y, x coords and one where time is missing. Also checks that timezone_cube
has been reordered correctly."""
cube = make_input_cube([3, 4])
timezone_cube = make_timezone_cube()
plugin = TimezoneExtraction()
if include_time_coord:
plugin.check_input_cube_dims(cube, timezone_cube)
assert plugin.timezone_cube.coord_dims("UTC_offset") == tuple(
[plugin.timezone_cube.ndim - 1]
)
else:
cube.remove_coord("time")
with pytest.raises(
ValueError, match=r"Expected coords on input_cube: time, y, x "
):
plugin.check_input_cube_dims(cube, timezone_cube)
@pytest.mark.parametrize(
"local_time, expect_success",
((datetime(2017, 11, 10, 5, 0), True), (datetime(2017, 11, 10, 6, 0), False)),
)
def test_check_input_cube_time(local_time, expect_success):
"""Checks that check_input_cube_time can differentiate between arguments that match
expected times and arguments that don't."""
cube = make_input_cube([3, 4])
timezone_cube = make_timezone_cube()
plugin = TimezoneExtraction()
plugin.check_input_cube_dims(cube, timezone_cube)
if expect_success:
plugin.check_input_cube_time(cube, local_time)
else:
with pytest.raises(
ValueError, match=r"Time coord on input cube does not match required times."
):
plugin.check_input_cube_time(cube, local_time)
def test_check_timezones_are_unique_pass():
"""Checks that check_timezones_are_unique allows our test cube"""
timezone_cube = make_timezone_cube()
TimezoneExtraction().check_timezones_are_unique(timezone_cube)
@pytest.mark.parametrize("offset", (1, -1))
def test_check_timezones_are_unique_fail(offset):
"""Checks that check_timezones_are_unique fails if we break our test cube"""
timezone_cube = make_timezone_cube()
timezone_cube.data[0, 0, 0] += offset
with pytest.raises(
ValueError,
match=r"Timezone cube does not map exactly one time zone to each spatial point",
):
TimezoneExtraction().check_timezones_are_unique(timezone_cube)
@pytest.mark.parametrize("with_percentiles", (True, False))
@pytest.mark.parametrize("input_as_cube", (True, False))
@pytest.mark.parametrize("input_has_time_bounds", (True, False))
def test_process(with_percentiles, input_as_cube, input_has_time_bounds):
"""Checks that the plugin process method returns a cube with expected data and
time coord for our test data"""
data_shape = [3, 4]
data = np.array(
[np.zeros(data_shape, dtype=np.float32), np.ones(data_shape, dtype=np.float32),]
)
expected_data = [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1]]
if with_percentiles:
cube = make_percentile_cube(data_shape, time_bounds=input_has_time_bounds)
data = np.array([data, data, data])
expected_data = np.array([expected_data, expected_data, expected_data])
else:
cube = make_input_cube(data_shape, time_bounds=input_has_time_bounds)
cube.data = data
local_time = datetime(2017, 11, 10, 5, 0)
timezone_cube = make_timezone_cube()
row1 = [cube.coord("time").units.date2num(datetime(2017, 11, 10, 4, 0))] * 4
row2 = [cube.coord("time").units.date2num(datetime(2017, 11, 10, 5, 0))] * 4
row3 = [cube.coord("time").units.date2num(datetime(2017, 11, 10, 6, 0))] * 4
expected_times = [row1, row2, row3]
expected_bounds = np.array(expected_times).reshape((3, 4, 1)) + [[[-3600, 0]]]
if not input_as_cube:
# Split cube into a list of cubes
cube = [c for c in cube.slices_over("time")]
result = TimezoneExtraction()(cube, timezone_cube, local_time)
assert_metadata_ok(result)
assert np.isclose(result.data, expected_data).all()
assert np.isclose(result.coord("time").points, expected_times).all()
if input_has_time_bounds:
assert np.isclose(result.coord("time").bounds, expected_bounds).all()
else:
assert result.coord("time").bounds is None
def test_bad_dtype():
"""Checks that the plugin raises a useful error if the output are float64"""
cube = make_input_cube([3, 4])
local_time = datetime(2017, 11, 10, 5, 0)
timezone_cube = make_timezone_cube()
timezone_cube.data = timezone_cube.data.astype(np.int32)
with pytest.raises(
TypeError,
match=r"Operation multiply on types \{dtype\(\'.*32\'\), dtype\(\'.*32\'\)\} results in",
):
TimezoneExtraction()(cube, timezone_cube, local_time)
def test_bad_spatial_coords():
"""Checks that the plugin raises a useful error if the longitude coord is shifted by
180 degrees"""
cube = make_input_cube([3, 4])
local_time = datetime(2017, 11, 10, 5, 0)
timezone_cube = make_timezone_cube()
timezone_cube.data = timezone_cube.data.astype(np.int32)
longitude_coord = timezone_cube.coord("longitude")
timezone_cube.replace_coord(longitude_coord.copy(longitude_coord.points + 180))
with pytest.raises(
ValueError,
match=r"Spatial coordinates on input_cube and timezone_cube do not match.",
):
TimezoneExtraction()(cube, timezone_cube, local_time)
|
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
"""
This is a module that will import the correct DRC/LVS/PEX
module based on what tools are found. It is a layer of indirection
to enable multiple verification tool support.
Each DRC/LVS/PEX tool should implement the functions run_drc, run_lvs, and
run_pex, repsectively. If there is an error, they should abort and report the errors.
If not, OpenRAM will continue as if nothing happened!
"""
import os
import debug
from globals import OPTS,find_exe,get_tool
import sys
debug.info(1,"Initializing verify...")
if not OPTS.check_lvsdrc:
debug.info(1,"LVS/DRC/PEX disabled.")
OPTS.drc_exe = None
OPTS.lvs_exe = None
OPTS.pex_exe = None
else:
debug.info(1, "Finding DRC/LVS/PEX tools.")
OPTS.drc_exe = get_tool("DRC", ["calibre","assura","magic"], OPTS.drc_name)
OPTS.lvs_exe = get_tool("LVS", ["calibre","assura","netgen"], OPTS.lvs_name)
OPTS.pex_exe = get_tool("PEX", ["calibre","magic"], OPTS.pex_name)
if OPTS.drc_exe == None:
from .none import run_drc,print_drc_stats
elif "calibre"==OPTS.drc_exe[0]:
from .calibre import run_drc,print_drc_stats
elif "assura"==OPTS.drc_exe[0]:
from .assura import run_drc,print_drc_stats
elif "magic"==OPTS.drc_exe[0]:
from .magic import run_drc,print_drc_stats
else:
debug.warning("Did not find a supported DRC tool.")
if OPTS.lvs_exe == None:
from .none import run_lvs,print_lvs_stats
elif "calibre"==OPTS.lvs_exe[0]:
from .calibre import run_lvs,print_lvs_stats
elif "assura"==OPTS.lvs_exe[0]:
from .assura import run_lvs,print_lvs_stats
elif "netgen"==OPTS.lvs_exe[0]:
from .magic import run_lvs,print_lvs_stats
else:
debug.warning("Did not find a supported LVS tool.")
if OPTS.pex_exe == None:
from .none import run_pex,print_pex_stats
elif "calibre"==OPTS.pex_exe[0]:
from .calibre import run_pex,print_pex_stats
elif "magic"==OPTS.pex_exe[0]:
from .magic import run_pex,print_pex_stats
else:
debug.warning("Did not find a supported PEX tool.")
|
from checks import AgentCheck
import subprocess
import re
import time
class UnicornCheck(AgentCheck):
def check(self, instance):
if 'pid_file' not in instance:
raise Exception('Unicorn instance missing "pid_file" value.')
pid_file = instance.get('pid_file', None)
master_pid = open(pid_file, 'r').read().strip()
worker_pids = self.worker_pids()
self.gauge('unicorn.workers.number', len(worker_pids))
self.gauge('unicorn.workers.idle_count', self.idle_worker_count(worker_pids))
for i, pid in enumerate(worker_pids):
cmd = "ps -o vsz= -o rss= -p %s" % pid
vms, rss = self.exec_cmd(cmd).split()
tag = 'worker_id:%d' % i
self.gauge('unicorn.workers.mem.vms', int(vms) * 1000, tags=[tag])
self.gauge('unicorn.workers.mem.rss', int(rss) * 1000, tags=[tag])
cmd="ps -o vsz= -o rss= -p %s" % master_pid
vms, rss = self.exec_cmd(cmd).split()
self.gauge('unicorn.master.mem.vms', int(vms) * 1000)
self.gauge('unicorn.master.mem.rss', int(rss) * 1000)
def worker_pids(self):
cmd = "ps aux | grep 'unicorn_rails worker' | grep -v grep | wc -l"
count = int(self.exec_cmd(cmd))
pids = []
for i in xrange(count):
cmd = "ps aux | grep 'unicorn_rails worker\[%d\]' | grep -v grep | awk '{ print $2 }'" % i
pids.append(self.exec_cmd(cmd))
return pids
def idle_worker_count(self, worker_pids):
before_cpu = {}
for pid in worker_pids:
before_cpu[pid] = self.cpu_time(pid)
time.sleep(0.1)
after_cpu = {}
for pid in worker_pids:
after_cpu[pid] = self.cpu_time(pid)
count = 0
for pid in worker_pids:
if after_cpu[pid] == before_cpu[pid]:
count += 1
return count
def cpu_time(self, pid):
cmd = "cat /proc/%s/stat | awk '{ print $14,$15 }'" % pid
usr, sys = self.exec_cmd(cmd).split()
return int(usr) + int(sys)
def exec_cmd(self, cmd):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
return out
|
from datetime import timedelta
from doctest import DocTestSuite
import pickle
import unittest
from freezegun import freeze_time
from django.contrib.auth import get_user_model
from django.db import IntegrityError
from django.test import RequestFactory
from django.test import TestCase as DjangoTestCase
from django.urls import reverse
from django_otp import DEVICE_ID_SESSION_KEY, oath, util
from django_otp.middleware import OTPMiddleware
from django_otp.models import VerifyNotAllowed
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
suite.addTests(tests)
suite.addTest(DocTestSuite(util))
suite.addTest(DocTestSuite(oath))
return suite
class TestCase(DjangoTestCase):
"""
Utilities for dealing with custom user models.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.User = get_user_model()
cls.USERNAME_FIELD = cls.User.USERNAME_FIELD
def create_user(self, username, password, **kwargs):
"""
Try to create a user, honoring the custom user model, if any.
This may raise an exception if the user model is too exotic for our
purposes.
"""
return self.User.objects.create_user(username, password=password, **kwargs)
class ThrottlingTestMixin:
"""
Generic tests for throttled devices.
Any concrete device implementation that uses throttling should define a
TestCase subclass that includes this as a base class. This will help verify
a correct integration of ThrottlingMixin.
Subclasses are responsible for populating self.device with a device to test
as well as implementing methods to generate tokens to test with.
"""
def setUp(self):
self.device = None
def valid_token(self):
""" Returns a valid token to pass to our device under test. """
raise NotImplementedError()
def invalid_token(self):
""" Returns an invalid token to pass to our device under test. """
raise NotImplementedError()
#
# Tests
#
def test_delay_imposed_after_fail(self):
verified1 = self.device.verify_token(self.invalid_token())
self.assertFalse(verified1)
verified2 = self.device.verify_token(self.valid_token())
self.assertFalse(verified2)
def test_delay_after_fail_expires(self):
verified1 = self.device.verify_token(self.invalid_token())
self.assertFalse(verified1)
with freeze_time() as frozen_time:
# With default settings initial delay is 1 second
frozen_time.tick(delta=timedelta(seconds=1.1))
verified2 = self.device.verify_token(self.valid_token())
self.assertTrue(verified2)
def test_throttling_failure_count(self):
self.assertEqual(self.device.throttling_failure_count, 0)
for i in range(0, 5):
self.device.verify_token(self.invalid_token())
# Only the first attempt will increase throttling_failure_count,
# the others will all be within 1 second of first
# and therefore not count as attempts.
self.assertEqual(self.device.throttling_failure_count, 1)
def test_verify_is_allowed(self):
# Initially should be allowed
verify_is_allowed1, data1 = self.device.verify_is_allowed()
self.assertEqual(verify_is_allowed1, True)
self.assertEqual(data1, None)
# After failure, verify is not allowed
self.device.verify_token(self.invalid_token())
verify_is_allowed2, data2 = self.device.verify_is_allowed()
self.assertEqual(verify_is_allowed2, False)
self.assertEqual(data2, {'reason': VerifyNotAllowed.N_FAILED_ATTEMPTS,
'failure_count': 1})
# After a successful attempt, should be allowed again
with freeze_time() as frozen_time:
frozen_time.tick(delta=timedelta(seconds=1.1))
self.device.verify_token(self.valid_token())
verify_is_allowed3, data3 = self.device.verify_is_allowed()
self.assertEqual(verify_is_allowed3, True)
self.assertEqual(data3, None)
class OTPMiddlewareTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
try:
self.alice = self.create_user('alice', 'password')
self.bob = self.create_user('bob', 'password')
except IntegrityError:
self.skipTest("Unable to create a test user.")
else:
for user in [self.alice, self.bob]:
device = user.staticdevice_set.create()
device.token_set.create(token=user.get_username())
self.middleware = OTPMiddleware(lambda r: None)
def test_verified(self):
request = self.factory.get('/')
request.user = self.alice
device = self.alice.staticdevice_set.get()
request.session = {
DEVICE_ID_SESSION_KEY: device.persistent_id
}
self.middleware(request)
self.assertTrue(request.user.is_verified())
def test_verified_legacy_device_id(self):
request = self.factory.get('/')
request.user = self.alice
device = self.alice.staticdevice_set.get()
request.session = {
DEVICE_ID_SESSION_KEY: '{}.{}/{}'.format(
device.__module__, device.__class__.__name__, device.id
)
}
self.middleware(request)
self.assertTrue(request.user.is_verified())
def test_unverified(self):
request = self.factory.get('/')
request.user = self.alice
request.session = {}
self.middleware(request)
self.assertFalse(request.user.is_verified())
def test_no_device(self):
request = self.factory.get('/')
request.user = self.alice
request.session = {
DEVICE_ID_SESSION_KEY: 'otp_static.staticdevice/0',
}
self.middleware(request)
self.assertFalse(request.user.is_verified())
def test_no_model(self):
request = self.factory.get('/')
request.user = self.alice
request.session = {
DEVICE_ID_SESSION_KEY: 'otp_bogus.bogusdevice/0',
}
self.middleware(request)
self.assertFalse(request.user.is_verified())
def test_wrong_user(self):
request = self.factory.get('/')
request.user = self.alice
device = self.bob.staticdevice_set.get()
request.session = {
DEVICE_ID_SESSION_KEY: device.persistent_id
}
self.middleware(request)
self.assertFalse(request.user.is_verified())
def test_pickling(self):
request = self.factory.get('/')
request.user = self.alice
device = self.alice.staticdevice_set.get()
request.session = {
DEVICE_ID_SESSION_KEY: device.persistent_id
}
self.middleware(request)
# Should not raise an exception.
pickle.dumps(request.user)
class LoginViewTestCase(TestCase):
def setUp(self):
try:
self.alice = self.create_user('alice', 'password')
self.bob = self.create_user('bob', 'password', is_staff=True)
except IntegrityError:
self.skipTest("Unable to create a test user.")
else:
for user in [self.alice, self.bob]:
device = user.staticdevice_set.create()
device.token_set.create(token=user.get_username())
def test_admin_login_template(self):
response = self.client.get(reverse('otpadmin:login'))
self.assertContains(response, 'Username:')
self.assertContains(response, 'Password:')
self.assertNotContains(response, 'OTP Device:')
self.assertContains(response, 'OTP Token:')
response = self.client.post(reverse('otpadmin:login'), data={
'username': self.bob.get_username(),
'password': 'password',
})
self.assertContains(response, 'Username:')
self.assertContains(response, 'Password:')
self.assertContains(response, 'OTP Device:')
self.assertContains(response, 'OTP Token:')
device = self.bob.staticdevice_set.get()
token = device.token_set.get()
response = self.client.post(reverse('otpadmin:login'), data={
'username': self.bob.get_username(),
'password': 'password',
'otp_device': device.persistent_id,
'otp_token': token.token,
'next': '/',
})
self.assertRedirects(response, '/')
def test_authenticate(self):
device = self.alice.staticdevice_set.get()
token = device.token_set.get()
params = {
'username': self.alice.get_username(),
'password': 'password',
'otp_device': device.persistent_id,
'otp_token': token.token,
'next': '/',
}
response = self.client.post('/login/', params)
self.assertRedirects(response, '/')
response = self.client.get('/')
self.assertContains(response, self.alice.get_username())
def test_verify(self):
device = self.alice.staticdevice_set.get()
token = device.token_set.get()
params = {
'otp_device': device.persistent_id,
'otp_token': token.token,
'next': '/',
}
self.client.login(username=self.alice.get_username(), password='password')
response = self.client.post('/login/', params)
self.assertRedirects(response, '/')
response = self.client.get('/')
self.assertContains(response, self.alice.get_username())
|
#!/usr/bin/env python
# PyOpenCV - A Python wrapper for OpenCV 2.x using Boost.Python and NumPy
# Copyright (c) 2009, Minh-Tri Pham
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of pyopencv's copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# For further inquiries, please contact Minh-Tri Pham at pmtri80@gmail.com.
# ----------------------------------------------------------------------------
import common as _c
import cxcore_hpp_ext as _ext
from cxcore_hpp_ext import *
#=============================================================================
# cxcore.hpp
#=============================================================================
vector_Size2i.__old_init__ = vector_Size2i.__init__
vector_Size2i.__init__ = _c.__vector__init__
vector_Size2i.create = _c.__vector_create
vector_Size2i.__repr__ = _c.__vector__repr__
vector_Size2i.tolist = _c.__vector_tolist
vector_Size2i.fromlist = classmethod(_c.__vector_fromlist)
_z = vector_Size2i()
_z.resize(1)
vector_Size2i.elem_type = _z[0].__class__
del(_z)
_str = "\n Property 'ndarray' provides a numpy.ndarray view on the object.\n If you create a reference to 'ndarray', you must keep the object unchanged until your reference is deleted, or Python may crash!\n Alternatively, you could create a reference to 'ndarray' by using 'asndarray(obj)', where 'obj' is an instance of this class.\n \n To create an instance of Size2i that shares the same data with an ndarray instance, use: 'asSize2i(a),\n where 'a' is an ndarray instance. Similarly, to avoid a potential Python crash, you must keep the current instance unchanged until the reference is deleted."
if Size2i.__doc__ is None:
Size2i.__doc__ = _str
else:
Size2i.__doc__ += _str
def _Size2i__getitem__(self, *args, **kwds):
return self.ndarray.__getitem__(*args, **kwds)
Size2i.__getitem__ = _Size2i__getitem__
def _Size2i__setitem__(self, *args, **kwds):
return self.ndarray.__setitem__(*args, **kwds)
Size2i.__setitem__ = _Size2i__setitem__
def _Size2i__getslice__(self, *args, **kwds):
return self.ndarray.__getslice__(*args, **kwds)
Size2i.__getslice__ = _Size2i__getslice__
def _Size2i__setslice__(self, *args, **kwds):
return self.ndarray.__setslice__(*args, **kwds)
Size2i.__setslice__ = _Size2i__setslice__
def _Size2i__iter__(self, *args, **kwds):
return self.ndarray.__iter__(*args, **kwds)
Size2i.__iter__ = _Size2i__iter__
def _Size2i__repr__(self):
return "Size2i(width=" + repr(self.width) + ", height=" + repr(self.height) + ")"
Size2i.__repr__ = _Size2i__repr__
vector_Size2f.__old_init__ = vector_Size2f.__init__
vector_Size2f.__init__ = _c.__vector__init__
vector_Size2f.create = _c.__vector_create
vector_Size2f.__repr__ = _c.__vector__repr__
vector_Size2f.tolist = _c.__vector_tolist
vector_Size2f.fromlist = classmethod(_c.__vector_fromlist)
_z = vector_Size2f()
_z.resize(1)
vector_Size2f.elem_type = _z[0].__class__
del(_z)
_str = "\n Property 'ndarray' provides a numpy.ndarray view on the object.\n If you create a reference to 'ndarray', you must keep the object unchanged until your reference is deleted, or Python may crash!\n Alternatively, you could create a reference to 'ndarray' by using 'asndarray(obj)', where 'obj' is an instance of this class.\n \n To create an instance of Size2f that shares the same data with an ndarray instance, use: 'asSize2f(a),\n where 'a' is an ndarray instance. Similarly, to avoid a potential Python crash, you must keep the current instance unchanged until the reference is deleted."
if Size2f.__doc__ is None:
Size2f.__doc__ = _str
else:
Size2f.__doc__ += _str
def _Size2f__getitem__(self, *args, **kwds):
return self.ndarray.__getitem__(*args, **kwds)
Size2f.__getitem__ = _Size2f__getitem__
def _Size2f__setitem__(self, *args, **kwds):
return self.ndarray.__setitem__(*args, **kwds)
Size2f.__setitem__ = _Size2f__setitem__
def _Size2f__getslice__(self, *args, **kwds):
return self.ndarray.__getslice__(*args, **kwds)
Size2f.__getslice__ = _Size2f__getslice__
def _Size2f__setslice__(self, *args, **kwds):
return self.ndarray.__setslice__(*args, **kwds)
Size2f.__setslice__ = _Size2f__setslice__
def _Size2f__iter__(self, *args, **kwds):
return self.ndarray.__iter__(*args, **kwds)
Size2f.__iter__ = _Size2f__iter__
def _Size2f__repr__(self):
return "Size2f(width=" + repr(self.width) + ", height=" + repr(self.height) + ")"
Size2f.__repr__ = _Size2f__repr__
Size = Size2i
vector_Rect.__old_init__ = vector_Rect.__init__
vector_Rect.__init__ = _c.__vector__init__
vector_Rect.create = _c.__vector_create
vector_Rect.__repr__ = _c.__vector__repr__
vector_Rect.tolist = _c.__vector_tolist
vector_Rect.fromlist = classmethod(_c.__vector_fromlist)
_z = vector_Rect()
_z.resize(1)
vector_Rect.elem_type = _z[0].__class__
del(_z)
_str = "\n Property 'ndarray' provides a numpy.ndarray view on the object.\n If you create a reference to 'ndarray', you must keep the object unchanged until your reference is deleted, or Python may crash!\n Alternatively, you could create a reference to 'ndarray' by using 'asndarray(obj)', where 'obj' is an instance of this class.\n \n To create an instance of Rect that shares the same data with an ndarray instance, use: 'asRect(a),\n where 'a' is an ndarray instance. Similarly, to avoid a potential Python crash, you must keep the current instance unchanged until the reference is deleted."
if Rect.__doc__ is None:
Rect.__doc__ = _str
else:
Rect.__doc__ += _str
def _Rect__getitem__(self, *args, **kwds):
return self.ndarray.__getitem__(*args, **kwds)
Rect.__getitem__ = _Rect__getitem__
def _Rect__setitem__(self, *args, **kwds):
return self.ndarray.__setitem__(*args, **kwds)
Rect.__setitem__ = _Rect__setitem__
def _Rect__getslice__(self, *args, **kwds):
return self.ndarray.__getslice__(*args, **kwds)
Rect.__getslice__ = _Rect__getslice__
def _Rect__setslice__(self, *args, **kwds):
return self.ndarray.__setslice__(*args, **kwds)
Rect.__setslice__ = _Rect__setslice__
def _Rect__iter__(self, *args, **kwds):
return self.ndarray.__iter__(*args, **kwds)
Rect.__iter__ = _Rect__iter__
def _Rect__repr__(self):
return "Rect(x=" + repr(self.x) + ", y=" + repr(self.y) + \
", width=" + repr(self.width) + ", height=" + repr(self.height) + ")"
Rect.__repr__ = _Rect__repr__
vector_RotatedRect.__old_init__ = vector_RotatedRect.__init__
vector_RotatedRect.__init__ = _c.__vector__init__
vector_RotatedRect.create = _c.__vector_create
vector_RotatedRect.__repr__ = _c.__vector__repr__
vector_RotatedRect.tolist = _c.__vector_tolist
vector_RotatedRect.fromlist = classmethod(_c.__vector_fromlist)
_z = vector_RotatedRect()
_z.resize(1)
vector_RotatedRect.elem_type = _z[0].__class__
del(_z)
_str = "\n Property 'ndarray' provides a numpy.ndarray view on the object.\n If you create a reference to 'ndarray', you must keep the object unchanged until your reference is deleted, or Python may crash!\n Alternatively, you could create a reference to 'ndarray' by using 'asndarray(obj)', where 'obj' is an instance of this class.\n \n To create an instance of RotatedRect that shares the same data with an ndarray instance, use: 'asRotatedRect(a),\n where 'a' is an ndarray instance. Similarly, to avoid a potential Python crash, you must keep the current instance unchanged until the reference is deleted."
if RotatedRect.__doc__ is None:
RotatedRect.__doc__ = _str
else:
RotatedRect.__doc__ += _str
def _RotatedRect__getitem__(self, *args, **kwds):
return self.ndarray.__getitem__(*args, **kwds)
RotatedRect.__getitem__ = _RotatedRect__getitem__
def _RotatedRect__setitem__(self, *args, **kwds):
return self.ndarray.__setitem__(*args, **kwds)
RotatedRect.__setitem__ = _RotatedRect__setitem__
def _RotatedRect__getslice__(self, *args, **kwds):
return self.ndarray.__getslice__(*args, **kwds)
RotatedRect.__getslice__ = _RotatedRect__getslice__
def _RotatedRect__setslice__(self, *args, **kwds):
return self.ndarray.__setslice__(*args, **kwds)
RotatedRect.__setslice__ = _RotatedRect__setslice__
def _RotatedRect__iter__(self, *args, **kwds):
return self.ndarray.__iter__(*args, **kwds)
RotatedRect.__iter__ = _RotatedRect__iter__
def _RotatedRect__repr__(self):
return "RotatedRect(center=" + repr(self.center) + ", size=" + repr(self.size) + \
", angle=" + repr(self.angle) + ")"
RotatedRect.__repr__ = _RotatedRect__repr__
def _RNG__repr__(self):
return "RNG(state=" + repr(self.state) + ")"
RNG.__repr__ = _RNG__repr__
def _TermCriteria__repr__(self):
return "TermCriteria(type=" + repr(self.type) + ", maxCount=" + repr(self.maxCount) + \
", epsilon=" + repr(self.epsilon) + ")"
TermCriteria.__repr__ = _TermCriteria__repr__
vector_KDTree_Node.__old_init__ = vector_KDTree_Node.__init__
vector_KDTree_Node.__init__ = _c.__vector__init__
vector_KDTree_Node.create = _c.__vector_create
vector_KDTree_Node.__repr__ = _c.__vector__repr__
vector_KDTree_Node.tolist = _c.__vector_tolist
vector_KDTree_Node.fromlist = classmethod(_c.__vector_fromlist)
_z = vector_KDTree_Node()
_z.resize(1)
vector_KDTree_Node.elem_type = _z[0].__class__
del(_z)
Seq_int.__old_init__ = Seq_int.__init__
def _Seq_int__init__(self, *args, **kwds):
Seq_int.__old_init__(self, *args, **kwds)
if args:
self.depends = [args[0]]
elif kwds:
self.depends = [kwds.values()[0]]
else:
self.depends = []
_Seq_int__init__.__doc__ = Seq_int.__old_init__.__doc__
Seq_int.__init__ = _Seq_int__init__
Seq_int.__iter__ = _c.__sd_iter__;
|
#!/usr/bin/python
"""
This python script parses the binary result files of the instrumentation
capability. Each file is composed of a header and a variable number of
records containing a timestamp and some custom recorded content.
"""
import sys
import struct
HEADER_FORMAT = "1024sIL1024si40sLLLQQQ"
HEADER_FIELDS = [
"library path",
"checksum",
"library loading base",
"command line",
"process id",
"host name",
"record count",
"location count",
"record offset",
"start time",
"one second in units",
"one record in units"
]
LOCATION_FORMAT = "II256s"
LOCATION_FIELDS = [
"location",
"extra",
"name"
]
RECORD_FORMAT = "LLII"
RECORD_FIELDS = [
"timestamp",
"lparam",
"wparam",
"location"
]
SECOND_IN_UNITS = 1 # Default value
RECORD_IN_UNITS = 1 # Default value
def read_instrumentation_file(file_path):
print "\n\n%s :\n" % file_path
with open(file_path, "rb") as f:
# Read instrumentation header
raw_header = f.read(struct.calcsize(HEADER_FORMAT))
header = dict(zip(HEADER_FIELDS,
struct.unpack(HEADER_FORMAT, raw_header)))
for k,v in header.iteritems():
print "%-20s : %s" % (k, str(v).replace("\x00",""))
global SECOND_IN_UNITS, RECORD_IN_UNITS
SECOND_IN_UNITS = header["one second in units"]
RECORD_IN_UNITS = header["one record in units"]
locations = {}
for i in range(header["location count"]):
raw_location = f.read(struct.calcsize(LOCATION_FORMAT))
location = dict(zip(LOCATION_FIELDS,
struct.unpack(LOCATION_FORMAT, raw_location)))
locations[location["location"]] = location["name"].replace("\x00","")
for k,v in locations.iteritems():
print "%-20i : %s" % (k, v)
for record in range(header["record count"]):
# Read a single instrumentation record
raw_record = f.read(struct.calcsize(RECORD_FORMAT))
record = dict(zip(RECORD_FIELDS,
struct.unpack(RECORD_FORMAT, raw_record)))
if record["location"] in locations:
record["location"] = locations[record["location"]]
else:
print "ERROR: Unknown location %s !" % record["location"]
record["location"] = "Unknown (%s)" % record["location"]
"""
print "Timestamp: %u\tLocation: %s\tWR ID: %u\tLength: %u" % \
(record["timestamp"], record["location"],
record["lparam"], record["wparam"])
"""
yield record
def timestamp_analysis(file_path):
sizes = {}
req_ids = {}
for record in read_instrumentation_file(file_path):
req_id, req_size = record["lparam"], record["wparam"]
if not req_id:
# print "ERROR: record id (\"lparam\") is zero!"
continue
# Check if matches a previous request (by ID)
if req_id in req_ids:
prev_req = req_ids[req_id]
req_size = prev_req["wparam"]
req_last = prev_req["lparam"]
interval = (prev_req["location"], record["location"])
duration = record["timestamp"] - prev_req["timestamp"]
# Produce stats entry for this record
if req_size not in sizes:
sizes[req_size] = {}
locations = sizes[req_size]
if interval not in locations:
locations[interval] = \
{ "count" : 0, "sum" : 0, "max" : 0, "size" : req_size}
stats = locations[interval]
# Update stats entry
stats["count"] += 1;
stats["sum"] += duration;
if "min" not in stats:
locations[interval]["min"] = duration
else:
stats["min"] = min(stats["min"], duration)
stats["max"] = max(stats["max"], duration)
# set this
req_ids[req_id] = record
# Re-arrange by amount of messages (importance?)
by_count = {}
for size in sizes:
for interval, stats in sizes[size].iteritems():
count = stats["count"]
if count:
if count not in by_count:
by_count[count] = {}
by_count[count][interval] = stats
# Output stats
order = by_count.keys()
order.sort()
for size in order:
for interval, stats in by_count[size].iteritems():
stats["sum"] -= stats["count"] * RECORD_IN_UNITS
stats["average"] = ((0.0 + stats["sum"]) / stats["count"])
avg = (10 ** 9) * stats["average"] / SECOND_IN_UNITS
print "from %s\nto %s\naverage %f ns\n%s\n" % \
(interval[0], interval[1], avg, str(stats))
print "P.S. averages exclude time it takes to record measurements."
print "SECOND_IN_UNITS is ", SECOND_IN_UNITS
print "RECORD_IN_UNITS is ", RECORD_IN_UNITS
if __name__ == "__main__":
for file_path in sys.argv[1:]:
timestamp_analysis(file_path)
|
# Generated by Django 2.2.13 on 2020-11-18 11:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("workflow_handler", "0031_task_correct"),
]
operations = [
migrations.AlterField(
model_name="taskactivity",
name="action",
field=models.CharField(
choices=[
("created", "created"),
("assigned", "assigned"),
("completed", "completed"),
("saved", "saved"),
("comment", "comment"),
("audited_correct", "audited_correct"),
("audited_incorrect", "audited_incorrect"),
("audited_empty", "audited_empty"),
],
max_length=128,
),
),
]
|
print("done!")
|
import os
import time
import datetime
import hashlib
import urllib
from urllib.parse import urljoin
import sqlite3
import json
import operator
import requests
import detector
key = ''
class MstdnStream:
"""Mastodon Steam Class
Usage::
>>> from mstdn import MstdnStream, MstdnStreamListner
>>> listener = MstdnStreamListner()
>>> stream = MstdnStream('https://pawoo.net', 'your-access-token', listener)
>>> stream.local()
"""
def __init__(self, base_url, access_token, listener):
self.base_url = base_url
self.session = requests.Session()
self.session.headers.update({'Authorization': 'Bearer ' + access_token})
self.listener = listener
def local(self):
url = urljoin(self.base_url, '/api/v1/streaming/public/local')
resp = self.session.get(url, stream=True)
resp.raise_for_status()
event = {}
for line in resp.iter_lines():
line = line.decode('utf-8')
if not line:
# End of content.
method_name = "on_{event}".format(event=event['event'])
f = operator.methodcaller(method_name, event['data'])
f(self.listener)
# refreash
event = {}
continue
if line.startswith(':'):
# TODO: Handle heatbeat
#print('startwith ":" {line}'.format(line=line))
None
else:
key, value = line.split(': ', 1)
if key in event:
event[key] += value
else:
event[key] = value
class MstdnStreamListner:
def __init__(self):
"""コンストラクタ"""
# 保存先
self.old_date = datetime.date.today()
self.mkdir()
def on_update(self, data):
"""UserStreamから飛んできたStatusを処理する"""
status = json.loads(data)
# 日付の確認
now = datetime.date.today()
if now != self.old_date:
self.old_date = now
self.dbfile.commit()
self.dbfile.close()
self.mkdir()
for image in status['media_attachments']:
if image['type'] != 'image':
break
# URL, ファイル名
media_url = image['url']
root, ext = os.path.splitext(media_url)
filename = str(self.fileno).zfill(5)
# ダウンロード
try:
temp_file = urllib.request.urlopen(media_url).read()
except:
print("Download Error")
continue
# md5の取得
current_md5 = hashlib.md5(temp_file).hexdigest()
# すでに取得済みの画像は飛ばす
if current_md5 in self.file_md5:
print("geted : " + status['account']['acct'] +"-" + filename + ext)
continue
# 画像検出
current_hash = None
current_hash, facex, facey, facew, faceh = detector.face_2d(temp_file, status['account']['acct'], filename + ext)
if current_hash is not None:
# すでに取得済みの画像は飛ばす
overlaped = False
for hash_key in self.file_hash:
check = int(hash_key,16) ^ int(current_hash,16)
count = bin(check).count('1')
if count < 7:
print("geted : " + status['account']['acct'] +"-" + filename + ext)
overlaped = True
break
# 画像本体を保存
if overlaped != True:
# 保存
out = open(self.base_path + filename + ext, "wb")
out.write(temp_file)
out.close()
# 取得済みとしてハッシュ値を保存
self.file_hash.append(current_hash)
self.file_md5.append(current_md5)
# ハッシュタグがあれば保存する
tags = []
if "tags" in status:
for hashtag in status['tags']:
tags.append(hashtag['name'])
# データベースに保存
self.dbfile.execute("insert into list(filename) values('" + filename + ext + "')")
self.dbfile.execute("update list set username = '" + status['account']['acct'] + "' where filename = '" + filename + ext + "'")
self.dbfile.execute("update list set url = '" + status['url'] + "' where filename = '" + filename + ext + "'")
self.dbfile.execute("update list set fav = " + str(status['favourites_count']) + " where filename = '" + filename + ext + "'")
self.dbfile.execute("update list set retweet = " + str(status['reblogs_count']) + " where filename = '" + filename + ext + "'")
self.dbfile.execute("update list set tags = '" + str(tags).replace("'","") + "' where filename = '" + filename + ext + "'")
self.dbfile.execute("update list set time = '" + str(datetime.datetime.now()) + "' where filename = '" + filename + ext + "'")
self.dbfile.execute("update list set facex = '" + str(facex) + "' where filename = '" + filename + ext + "'")
self.dbfile.execute("update list set facey = '" + str(facey) + "' where filename = '" + filename + ext + "'")
self.dbfile.execute("update list set facew = '" + str(facew) + "' where filename = '" + filename + ext + "'")
self.dbfile.execute("update list set faceh = '" + str(faceh) + "' where filename = '" + filename + ext + "'")
self.dbfile.commit()
print("saved : " + status['account']['acct'] + "-" + filename + ext)
if tags != []:
print(" tags : " + str(tags))
self.fileno += 1
else:
print("skiped : " + status['account']['acct'] + "-" + filename + ext)
temp_file = None
def mkdir(self):
"""保存用のフォルダーを生成し、必要な変数を初期化する"""
self.base_path = "./" + self.old_date.isoformat() + "/"
if os.path.exists(self.base_path) == False:
os.mkdir(self.base_path)
self.fileno = 0
self.file_hash = []
self.file_md5 = []
self.dbfile = sqlite3.connect(self.base_path + "list.db")
try:
self.dbfile.execute("create table list (filename, username, url, fav, retweet, tags, time, facex, facey, facew, faceh)")
except:
None
def on_notification(self, data):
None
def on_delete(self, data):
None
def main():
listener = MstdnStreamListner()
stream = MstdnStream('https://pawoo.net', key, listener)
print('Start Streaming!')
while True:
try:
stream.local()
except KeyboardInterrupt:
exit()
except:
print('UserStream Error')
time.sleep(60)
if __name__ == '__main__':
main()
|
import itertools
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
def get_bsn():
for n in itertools.count():
eight_first_digits = [int(x) for x in str(n + 10 ** 7).zfill(8)]
total = sum((9 - i) * digit for i, digit in enumerate(eight_first_digits))
check_digit = total % 11
if check_digit == 10:
continue
rsin = "".join(str(x) for x in eight_first_digits + [check_digit])
yield rsin
def get_a_nummer():
for n in itertools.count():
ten_digits = [int(x) for x in str(n + 10 ** 7).zfill(10)]
a_nummer = "".join(str(x) for x in ten_digits)
yield a_nummer
def is_url(value):
if "testserver" in value:
return True
if not isinstance(value, str):
return False
try:
URLValidator()(value)
except ValidationError:
return False
return True
|
import os
import logging
from google.cloud import storage
logger = logging.getLogger(__name__)
class RemoteGCS:
def __init__(self, bucket='', project=None, credentials=None):
self._storage_client = storage.Client(
project=project, credentials=credentials)
self._bucket = self._storage_client.get_bucket(bucket)
self._bucket_name = bucket
self._project = project
def _get_bucket(self):
return self._bucket
def _remote_file_list(self):
return [x.name for x in self._get_bucket().list_blobs()]
def _download_file(self, local_filename, remote_filename):
bucket = self._get_bucket()
remote_filename = remote_filename.replace('\\', '/')
blob = bucket.blob(remote_filename)
# if not os.path.exists(local_filename):
# os.path.ma
path_to_local_file = os.path.dirname(local_filename)
if not os.path.exists(path_to_local_file):
os.makedirs(path_to_local_file)
with open(local_filename, 'wb') as file_obj:
logger.debug('Start downloading ' +
remote_filename + ' to ' + local_filename)
blob.download_to_file(file_obj)
logger.debug('Finished downloading ' + remote_filename)
def _upload_file(self, local_filename, remote_filename):
bucket = self._get_bucket()
remote_filename = remote_filename.replace('\\', '/')
blob = bucket.blob(remote_filename)
with open(local_filename, 'rb') as file_obj:
logger.debug('Start uploading file ' +
local_filename + ' to ' + remote_filename)
blob.upload_from_file(file_obj)
logger.debug('Start uploading file ' + local_filename)
def file_exists(self, filename):
filename = filename.replace('\\', '/')
return storage.Blob(bucket=self._bucket, name=filename).exists(self._storage_client)
def _delete_file(self, filename):
bucket = self._get_bucket()
filename = filename.replace('\\', '/')
blob = bucket.blob(filename)
blob.delete()
|
# Time: O(r - l)
# Space: O(1)
import math
class Solution(object):
def abbreviateProduct(self, left, right):
"""
:type left: int
:type right: int
:rtype: str
"""
PREFIX_LEN = SUFFIX_LEN = 5
MOD = 10**(PREFIX_LEN+SUFFIX_LEN)
curr, zeros = 1, 0
abbr = False
for i in xrange(left, right+1):
curr *= i
while not curr%10:
curr //= 10
zeros += 1
q, curr = divmod(curr, MOD)
if q:
abbr = True
if not abbr:
return "%se%s" % (curr, zeros)
decimal = reduce(lambda x, y: (x+y)%1, (math.log10(i) for i in xrange(left, right+1)))
prefix = str(int(10**(decimal+(PREFIX_LEN-1))))
suffix = str(curr % 10**SUFFIX_LEN).zfill(SUFFIX_LEN)
return "%s...%se%s" % (prefix, suffix, zeros)
|
import re
from pathlib import Path
import git
from .colors import styles
from .defaults import *
headBranchExtractionRegExp = re.compile("^\\s*HEAD\\s+branch:\\s+(.+)\\s*$", re.M)
def getRemoteDefaultBranch(remote):
# get_remote_ref_states
return headBranchExtractionRegExp.search(remote.repo.git.remote("show", remote.name)).group(1)
def upgradeLibrary(localPath: Path, gitUri: str, refspec: str = None, progressCallback=None, prefixPath: Path = None):
"""Upgrades a library of Kaitai Struct formats"""
if progressCallback is None:
def progressCallback(x):
return None
localPath = Path(localPath).absolute()
r = None
actName = ""
if not (localPath.exists() and localPath.is_dir()):
localPath.mkdir()
#assert (localPath.exists() and localPath.is_dir())
if not (localPath / ".git").exists():
actName = "Clon"
r = git.Repo.init(str(localPath)) # git.Repo.clone disallows to specify a dir, so we workaround with init + pull
#assert ( (localPath/".git").exists() )
else:
actName = "Pull"
r = git.Repo(str(localPath))
#progress=print
#A function (callable) that is called with the progress information.
#Signature: ``progress(op_code, cur_count, max_count=None, message='')``.
#origin = r.create_remote('origin', repo.remotes.origin.url)
try:
r.remotes["origin"].set_url(gitUri)
remote = r.remotes["origin"]
except BaseException:
remote = r.create_remote("origin", gitUri)
gargs = [r.remotes["origin"].name]
if not refspec:
refspec = getRemoteDefaultBranch(remote)
gargs.append(refspec)
r.git.checkout(refspec, B=True)
#def progressHandler(op_code, cur_count, max_count=None, message=''):
# print(op_code, cur_count, max_count, message)
# progressCallback(message)
gkwargs = {
"depth": 1,
"force": True,
"update-shallow": True,
#"verify-signatures":True,
#"progress":progressHandler,
"verbose": True,
}
def pathToPrettyString(p: Path) -> str:
return str(p.relative_to(prefixPath) if prefixPath else p)
progressCallback(styles["operationName"](actName + "ing") + " " + styles["info"](gitUri) + " to " + styles["info"](pathToPrettyString(localPath)) + " ...")
r.remotes["origin"].fetch(*gargs[1:], **gkwargs)
r.head.reset(r.remotes["origin"].name + "/" + refspec, index=True, working_tree=True)
progressCallback("\b" + styles["operationName"](actName + "ed"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.